From f412f8e97bfe4040f73392d1025c76bc60216c55 Mon Sep 17 00:00:00 2001 From: Markus Stange Date: Fri, 31 Oct 2025 14:48:09 +0100 Subject: [PATCH 1/7] Use a longer test timeout when debugging with VS code. --- .vscode/launch.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.vscode/launch.json b/.vscode/launch.json index 8771fec1e9..b3d04c25e7 100644 --- a/.vscode/launch.json +++ b/.vscode/launch.json @@ -8,7 +8,7 @@ "runtimeExecutable": "yarn", "runtimeArgs": ["test"], "program": "${workspaceFolder}/node_modules/jest/bin/jest", - "args": ["--runInBand", "${file}"], + "args": ["--runInBand", "--testTimeout=300000", "${file}"], "cwd": "${workspaceFolder}", "internalConsoleOptions": "openOnSessionStart", "request": "launch", From add8c214f75efd0d75ab888cffeb23b5722eb6b0 Mon Sep 17 00:00:00 2001 From: Markus Stange Date: Fri, 31 Oct 2025 14:48:09 +0100 Subject: [PATCH 2/7] Use Uint8Array instead of ArrayBuffer when parsing binary formats. Uint8Array is more flexible because it can refer to a subrange within a bigger ArrayBuffer. --- src/profile-logic/import/art-trace.ts | 18 +++++++++++------- src/profile-logic/import/simpleperf.ts | 15 +++++++++------ src/profile-logic/process-profile.ts | 17 ++++++++--------- src/utils/magic.ts | 5 +---- 4 files changed, 29 insertions(+), 26 deletions(-) diff --git a/src/profile-logic/import/art-trace.ts b/src/profile-logic/import/art-trace.ts index 4e3d805c6d..5555043438 100644 --- a/src/profile-logic/import/art-trace.ts +++ b/src/profile-logic/import/art-trace.ts @@ -194,12 +194,12 @@ type ArtTrace = { }; function detectArtTraceFormat( - traceBuffer: ArrayBufferLike + traceBuffer: Uint8Array ): 'regular' | 'streaming' | 'unrecognized' { try { const lengthOfExpectedFirstTwoLinesOfSummarySection = '*version\nX\n' .length; - const firstTwoLinesBuffer = traceBuffer.slice( + const firstTwoLinesBuffer = traceBuffer.subarray( 0, lengthOfExpectedFirstTwoLinesOfSummarySection ); @@ -213,7 +213,11 @@ function detectArtTraceFormat( } try { - const dataView = new DataView(traceBuffer); + const dataView = new DataView( + traceBuffer.buffer, + traceBuffer.byteOffset, + traceBuffer.byteLength + ); const magic = dataView.getUint32(0, true); if (magic === TRACE_MAGIC) { return 'streaming'; @@ -523,9 +527,9 @@ function parseStreamingFormat(reader: ByteReader) { }; } -function parseArtTrace(buffer: ArrayBufferLike): ArtTrace { +function parseArtTrace(buffer: Uint8Array): ArtTrace { try { - const reader = new ByteReader(new Uint8Array(buffer)); + const reader = new ByteReader(buffer); switch (detectArtTraceFormat(buffer)) { case 'regular': return parseRegularFormat(reader); @@ -915,13 +919,13 @@ class ThreadBuilder { } } -export function isArtTraceFormat(traceBuffer: ArrayBufferLike) { +export function isArtTraceFormat(traceBuffer: Uint8Array) { return detectArtTraceFormat(traceBuffer) !== 'unrecognized'; } // Convert an ART trace to the Gecko profile format. export function convertArtTraceProfile( - traceBuffer: ArrayBufferLike + traceBuffer: Uint8Array ): GeckoProfileVersion11 { const trace = parseArtTrace(traceBuffer); const originalIntervalInUsec = procureSamplingInterval(trace); diff --git a/src/profile-logic/import/simpleperf.ts b/src/profile-logic/import/simpleperf.ts index 52fe0a9831..d6111b93be 100644 --- a/src/profile-logic/import/simpleperf.ts +++ b/src/profile-logic/import/simpleperf.ts @@ -474,13 +474,17 @@ class FirefoxProfile { } export class SimpleperfReportConverter { - buffer: ArrayBufferLike; + buffer: Uint8Array; bufferView: DataView; bufferOffset: number = 0; - constructor(buffer: ArrayBufferLike) { + constructor(buffer: Uint8Array) { this.buffer = buffer; - this.bufferView = new DataView(buffer); + this.bufferView = new DataView( + buffer.buffer, + buffer.byteOffset, + buffer.byteLength + ); } readUint16LE() { @@ -509,11 +513,10 @@ export class SimpleperfReportConverter { } readRecord(recordSize: number): report.Record { - const recordBuffer = this.buffer.slice( + const recordArray = this.buffer.subarray( this.bufferOffset, this.bufferOffset + recordSize ); - const recordArray = new Uint8Array(recordBuffer); this.bufferOffset += recordSize; return report.Record.decode(recordArray); @@ -577,7 +580,7 @@ export class SimpleperfReportConverter { } export function convertSimpleperfTraceProfile( - traceBuffer: ArrayBufferLike + traceBuffer: Uint8Array ): Profile { return new SimpleperfReportConverter(traceBuffer).process(); } diff --git a/src/profile-logic/process-profile.ts b/src/profile-logic/process-profile.ts index 7a363ec58d..1dee88bcb8 100644 --- a/src/profile-logic/process-profile.ts +++ b/src/profile-logic/process-profile.ts @@ -1965,27 +1965,26 @@ export async function unserializeProfileOfArbitraryFormat( // object is constructed from an ArrayBuffer in a different context... which // happens in our tests. if (String(arbitraryFormat) === '[object ArrayBuffer]') { - let arrayBuffer = arbitraryFormat as ArrayBuffer; + const arrayBuffer = arbitraryFormat as ArrayBuffer; // Check for the gzip magic number in the header. If we find it, decompress // the data first. - const profileBytes = new Uint8Array(arrayBuffer); + let profileBytes = new Uint8Array(arrayBuffer); if (isGzip(profileBytes)) { - const decompressedProfile = await decompress(profileBytes); - arrayBuffer = decompressedProfile.buffer; + profileBytes = await decompress(profileBytes); } - if (isArtTraceFormat(arrayBuffer)) { - arbitraryFormat = convertArtTraceProfile(arrayBuffer); - } else if (verifyMagic(SIMPLEPERF_MAGIC, arrayBuffer)) { + if (isArtTraceFormat(profileBytes)) { + arbitraryFormat = convertArtTraceProfile(profileBytes); + } else if (verifyMagic(SIMPLEPERF_MAGIC, profileBytes)) { const { convertSimpleperfTraceProfile } = await import( './import/simpleperf' ); - arbitraryFormat = convertSimpleperfTraceProfile(arrayBuffer); + arbitraryFormat = convertSimpleperfTraceProfile(profileBytes); } else { try { const textDecoder = new TextDecoder(undefined, { fatal: true }); - arbitraryFormat = await textDecoder.decode(arrayBuffer); + arbitraryFormat = await textDecoder.decode(profileBytes); } catch (e) { console.error('Source exception:', e); throw new Error( diff --git a/src/utils/magic.ts b/src/utils/magic.ts index abbeb05e59..ca152798f4 100644 --- a/src/utils/magic.ts +++ b/src/utils/magic.ts @@ -1,9 +1,6 @@ export const SIMPLEPERF = 'SIMPLEPERF'; -export function verifyMagic( - magic: string, - traceBuffer: ArrayBufferLike -): boolean { +export function verifyMagic(magic: string, traceBuffer: Uint8Array): boolean { return ( new TextDecoder('utf8').decode(traceBuffer.slice(0, magic.length)) === magic ); From b2280bf768c30babd9b9ebe9318a1e411c270339 Mon Sep 17 00:00:00 2001 From: Markus Stange Date: Fri, 31 Oct 2025 14:48:09 +0100 Subject: [PATCH 3/7] Accept Uint8Array in profile parsing in addition to ArrayBuffer. This is a more natural fit and avoids the copy in symbolicator-cli. --- src/profile-logic/process-profile.ts | 9 ++++++- src/symbolicator-cli/index.ts | 9 +------ src/test/unit/profile-conversion.test.ts | 30 +++++++++++++----------- 3 files changed, 25 insertions(+), 23 deletions(-) diff --git a/src/profile-logic/process-profile.ts b/src/profile-logic/process-profile.ts index 1dee88bcb8..13c0ae7462 100644 --- a/src/profile-logic/process-profile.ts +++ b/src/profile-logic/process-profile.ts @@ -1966,10 +1966,17 @@ export async function unserializeProfileOfArbitraryFormat( // happens in our tests. if (String(arbitraryFormat) === '[object ArrayBuffer]') { const arrayBuffer = arbitraryFormat as ArrayBuffer; + arbitraryFormat = new Uint8Array(arrayBuffer); + } + // Handle binary formats. + if ( + arbitraryFormat instanceof Uint8Array || + (globalThis.Buffer && arbitraryFormat instanceof globalThis.Buffer) + ) { // Check for the gzip magic number in the header. If we find it, decompress // the data first. - let profileBytes = new Uint8Array(arrayBuffer); + let profileBytes = arbitraryFormat as Uint8Array; if (isGzip(profileBytes)) { profileBytes = await decompress(profileBytes); } diff --git a/src/symbolicator-cli/index.ts b/src/symbolicator-cli/index.ts index cf0ff1bb21..2e917c7a52 100644 --- a/src/symbolicator-cli/index.ts +++ b/src/symbolicator-cli/index.ts @@ -85,15 +85,8 @@ export async function run(options: CliOptions) { // by our importers. const bytes = fs.readFileSync(options.input, null); - // bytes is a Uint8Array whose underlying ArrayBuffer can be longer than bytes.length. - // Copy the contents into a new ArrayBuffer which is sized correctly, so that we - // don't include uninitialized data from the extra parts of the underlying buffer. - // Alternatively, we could make unserializeProfileOfArbitraryFormat support - // Uint8Array or Buffer in addition to ArrayBuffer. - const byteBufferCopy = Uint8Array.prototype.slice.call(bytes).buffer; - // Load the profile. - const profile = await unserializeProfileOfArbitraryFormat(byteBufferCopy); + const profile = await unserializeProfileOfArbitraryFormat(bytes); if (profile === undefined) { throw new Error('Unable to parse the profile.'); } diff --git a/src/test/unit/profile-conversion.test.ts b/src/test/unit/profile-conversion.test.ts index 35271e7e2d..9cd61ce93f 100644 --- a/src/test/unit/profile-conversion.test.ts +++ b/src/test/unit/profile-conversion.test.ts @@ -245,9 +245,8 @@ describe('converting Google Chrome profile', function () { 'src/test/fixtures/upgrades/chrome-tracing.json.gz' ); const decompressedBuffer = zlib.gunzipSync(compressedBuffer); - const profile = await unserializeProfileOfArbitraryFormat( - decompressedBuffer.buffer - ); + const profile = + await unserializeProfileOfArbitraryFormat(decompressedBuffer); if (profile === undefined) { throw new Error('Unable to parse the profile.'); } @@ -263,9 +262,8 @@ describe('converting Google Chrome profile', function () { 'src/test/fixtures/upgrades/chrome-trace-issue-5429.json.gz' ); const decompressedBuffer = zlib.gunzipSync(compressedBuffer); - const profile = await unserializeProfileOfArbitraryFormat( - decompressedBuffer.buffer - ); + const profile = + await unserializeProfileOfArbitraryFormat(decompressedBuffer); if (profile === undefined) { throw new Error('Unable to parse the profile.'); } @@ -425,8 +423,9 @@ describe('converting ART trace', function () { const buffer = fs.readFileSync( 'src/test/fixtures/upgrades/art-trace-regular.trace.gz' ); - const arrayBuffer = zlib.gunzipSync(buffer).buffer; - const profile = await unserializeProfileOfArbitraryFormat(arrayBuffer); + const uncompressedBytes = zlib.gunzipSync(buffer); + const profile = + await unserializeProfileOfArbitraryFormat(uncompressedBytes); if (profile === undefined) { throw new Error('Unable to parse the profile.'); } @@ -440,8 +439,9 @@ describe('converting ART trace', function () { const buffer = fs.readFileSync( 'src/test/fixtures/upgrades/art-trace-streaming.trace.gz' ); - const arrayBuffer = zlib.gunzipSync(buffer).buffer; - const profile = await unserializeProfileOfArbitraryFormat(arrayBuffer); + const uncompressedBytes = zlib.gunzipSync(buffer); + const profile = + await unserializeProfileOfArbitraryFormat(uncompressedBytes); if (profile === undefined) { throw new Error('Unable to parse the profile.'); } @@ -457,8 +457,9 @@ describe('converting Simpleperf trace', function () { const buffer = fs.readFileSync( 'src/test/fixtures/upgrades/simpleperf-task-clock.trace.gz' ); - const arrayBuffer = zlib.gunzipSync(buffer).buffer; - const profile = await unserializeProfileOfArbitraryFormat(arrayBuffer); + const uncompressedBytes = zlib.gunzipSync(buffer); + const profile = + await unserializeProfileOfArbitraryFormat(uncompressedBytes); if (profile === undefined) { throw new Error('Unable to parse the profile.'); } @@ -472,8 +473,9 @@ describe('converting Simpleperf trace', function () { const buffer = fs.readFileSync( 'src/test/fixtures/upgrades/simpleperf-cpu-clock.trace.gz' ); - const arrayBuffer = zlib.gunzipSync(buffer).buffer; - const profile = await unserializeProfileOfArbitraryFormat(arrayBuffer); + const uncompressedBytes = zlib.gunzipSync(buffer); + const profile = + await unserializeProfileOfArbitraryFormat(uncompressedBytes); if (profile === undefined) { throw new Error('Unable to parse the profile.'); } From 850eb1de2e3eb1384495c87fe4c3912a1c361518 Mon Sep 17 00:00:00 2001 From: Markus Stange Date: Mon, 3 Nov 2025 13:10:54 -0500 Subject: [PATCH 4/7] Move Jest config from package.json to jest.config.js. --- jest.config.js | 60 ++++++++++++++++++++++++++++++++++++++++++++++++++ package.json | 33 --------------------------- 2 files changed, 60 insertions(+), 33 deletions(-) create mode 100644 jest.config.js diff --git a/jest.config.js b/jest.config.js new file mode 100644 index 0000000000..8c54951c30 --- /dev/null +++ b/jest.config.js @@ -0,0 +1,60 @@ +/** + * Jest configuration with two separate test projects: + * 1. Browser tests (src/) - React/browser environment tests + * 2. CLI tests (cli-tests/) - Node.js CLI integration tests + */ + +module.exports = { + projects: [ + // ======================================================================== + // Browser Tests (React/browser environment) + // ======================================================================== + { + displayName: 'browser', + testMatch: ['/src/**/*.test.{js,jsx,ts,tsx}'], + + // Use custom jsdom environment for browser/React testing + testEnvironment: './src/test/custom-environment', + + // Setup files that run after the test framework is installed + setupFilesAfterEnv: [ + 'jest-extended/all', // Extended matchers like toBeNumber() + './src/test/setup.ts', // Browser-specific test setup + ], + + // Coverage collection (for browser tests only) + collectCoverageFrom: [ + 'src/**/*.{js,jsx,ts,tsx}', + '!**/node_modules/**', + '!src/types/libdef/**', + ], + + // File extensions to consider + moduleFileExtensions: ['js', 'jsx', 'ts', 'tsx'], + + // Transform ESM modules to CommonJS for Jest + // These packages ship as pure ESM and need to be transformed by Babel + transformIgnorePatterns: [ + '/node_modules/(?!(query-string|decode-uri-component|iongraph-web|split-on-first|filter-obj|fetch-mock)/)', + ], + + // Mock static assets (images, CSS, etc.) in browser tests + moduleNameMapper: { + '\\.(jpg|jpeg|png|gif|eot|otf|webp|svg|ttf|woff|woff2|mp4|webm|wav|mp3|m4a|aac|oga|ftl)$': + '/src/test/fixtures/mocks/file-mock.ts', + '\\.(css|less)$': '/src/test/fixtures/mocks/style-mock.ts', + }, + + // Global variables available in tests + globals: { + AVAILABLE_STAGING_LOCALES: null, + }, + + // Snapshot formatting + snapshotFormat: { + escapeString: true, + printBasicPrototype: true, + }, + }, + ], +}; diff --git a/package.json b/package.json index 0c070b2d35..d0c60847c9 100644 --- a/package.json +++ b/package.json @@ -193,39 +193,6 @@ "@types/react-splitter-layout/@types/react": "^18.3.26", "@types/trusted-types": "^2.0.7" }, - "jest": { - "collectCoverageFrom": [ - "src/**/*.{js,jsx,ts,tsx}", - "!**/node_modules/**", - "!src/types/libdef/**" - ], - "moduleFileExtensions": [ - "js", - "jsx", - "ts", - "tsx" - ], - "transformIgnorePatterns": [ - "/node_modules/(?!(query-string|decode-uri-component|iongraph-web|split-on-first|filter-obj|fetch-mock)/)" - ], - "moduleNameMapper": { - "\\.(jpg|jpeg|png|gif|eot|otf|webp|svg|ttf|woff|woff2|mp4|webm|wav|mp3|m4a|aac|oga|ftl)$": "/src/test/fixtures/mocks/file-mock.ts", - "\\.(css|less)$": "/src/test/fixtures/mocks/style-mock.ts" - }, - "setupFilesAfterEnv": [ - "jest-extended/all", - "./src/test/setup.ts" - ], - "globals": { - "AVAILABLE_STAGING_LOCALES": null - }, - "snapshotFormat": { - "escapeString": true, - "printBasicPrototype": true - }, - "testEnvironment": "./src/test/custom-environment", - "verbose": false - }, "husky": { "hooks": { "post-checkout": "node bin/post-checkout.js", From 520d31a5ebad895bbeb994db13c6c110db23f78f Mon Sep 17 00:00:00 2001 From: Markus Stange Date: Mon, 3 Nov 2025 10:23:23 -0500 Subject: [PATCH 5/7] Move some profile fetching code into a separate module. This logic can be useful independently of redux actions, for CLI scripts which want to support profiler URLs. --- src/actions/receive-profile.ts | 287 +-------------- src/test/components/Root-history.test.tsx | 2 +- .../receive-profile.test.ts.snap | 300 --------------- src/test/store/receive-profile.test.ts | 189 +--------- .../__snapshots__/profile-fetch.test.ts.snap | 301 +++++++++++++++ src/test/unit/profile-fetch.test.ts | 207 +++++++++++ src/utils/profile-fetch.ts | 346 ++++++++++++++++++ 7 files changed, 872 insertions(+), 760 deletions(-) create mode 100644 src/test/unit/__snapshots__/profile-fetch.test.ts.snap create mode 100644 src/test/unit/profile-fetch.test.ts create mode 100644 src/utils/profile-fetch.ts diff --git a/src/actions/receive-profile.ts b/src/actions/receive-profile.ts index 0bd60fb0ec..d316a772f9 100644 --- a/src/actions/receive-profile.ts +++ b/src/actions/receive-profile.ts @@ -2,6 +2,13 @@ * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ import { oneLine } from 'common-tags'; +import { + fetchProfile, + getProfileUrlForHash, + type ProfileOrZip, + deduceContentType, + extractJsonFromArrayBuffer, +} from 'firefox-profiler/utils/profile-fetch'; import queryString from 'query-string'; import type JSZip from 'jszip'; import { @@ -17,10 +24,8 @@ import { } from 'firefox-profiler/profile-logic/symbolication'; import * as MozillaSymbolicationAPI from 'firefox-profiler/profile-logic/mozilla-symbolication-api'; import { mergeProfilesForDiffing } from 'firefox-profiler/profile-logic/merge-compare'; -import { decompress, isGzip } from 'firefox-profiler/utils/gz'; import { expandUrl } from 'firefox-profiler/utils/shorten-url'; import { TemporaryError } from 'firefox-profiler/utils/errors'; -import { isLocalURL } from 'firefox-profiler/utils/url'; import { getSelectedThreadIndexesOrNull, getGlobalTrackOrder, @@ -64,7 +69,6 @@ import { import { setDataSource } from './profile-view'; import { fatalError } from './errors'; import { batchLoadDataUrlIcons } from './icons'; -import { GOOGLE_STORAGE_BUCKET } from 'firefox-profiler/app-logic/constants'; import { determineTimelineType, hasUsefulSamples, @@ -553,7 +557,7 @@ async function _unpackGeckoProfileFromBrowser( // global. This happens especially with tests but could happen in the future // in Firefox too. if (Object.prototype.toString.call(profile) === '[object ArrayBuffer]') { - return _extractJsonFromArrayBuffer(profile as ArrayBuffer); + return extractJsonFromArrayBuffer(profile as ArrayBuffer); } return profile; } @@ -563,9 +567,9 @@ function getSymbolStore( symbolServerUrl: string, browserConnection: BrowserConnection | null ): SymbolStore | null { - if (!window.indexedDB) { - // We could be running in a test environment with no indexedDB support. Do not - // return a symbol store in this case. + if (typeof window === 'undefined' || !window.indexedDB) { + // We could be running in a test environment or Node.js with no indexedDB support. + // Do not return a symbol store in this case. return null; } @@ -870,265 +874,6 @@ export function temporaryError(error: TemporaryError): Action { }; } -function _wait(delayMs: number): Promise { - return new Promise((resolve) => setTimeout(resolve, delayMs)); -} - -function _loadProbablyFailedDueToSafariLocalhostHTTPRestriction( - url: string, - error: Error -): boolean { - if (!navigator.userAgent.match(/Safari\/\d+\.\d+/)) { - return false; - } - // Check if Safari considers this mixed content. - const parsedUrl = new URL(url); - return ( - error.name === 'TypeError' && - parsedUrl.protocol === 'http:' && - isLocalURL(parsedUrl) && - location.protocol === 'https:' - ); -} - -class SafariLocalhostHTTPLoadError extends Error { - override name = 'SafariLocalhostHTTPLoadError'; -} - -type FetchProfileArgs = { - url: string; - onTemporaryError: (param: TemporaryError) => void; - // Allow tests to capture the reported error, but normally use console.error. - reportError?: (...data: Array) => void; -}; - -type ProfileOrZip = - | { responseType: 'PROFILE'; profile: unknown } - | { responseType: 'ZIP'; zip: JSZip }; - -/** - * Tries to fetch a profile on `url`. If the profile is not found, - * `onTemporaryError` is called with an appropriate error, we wait 1 second, and - * then tries again. If we still can't find the profile after 11 tries, the - * returned promise is rejected with a fatal error. - * If we can retrieve the profile properly, the returned promise is resolved - * with the JSON.parsed profile. - */ -export async function _fetchProfile( - args: FetchProfileArgs -): Promise { - const MAX_WAIT_SECONDS = 10; - let i = 0; - const { url, onTemporaryError } = args; - // Allow tests to capture the reported error, but normally use console.error. - const reportError = args.reportError || console.error; - - while (true) { - let response; - try { - response = await fetch(url); - } catch (e) { - // Case 1: Exception. - if (_loadProbablyFailedDueToSafariLocalhostHTTPRestriction(url, e)) { - throw new SafariLocalhostHTTPLoadError(); - } - throw e; - } - - // Case 2: successful answer. - if (response.ok) { - return _extractProfileOrZipFromResponse(url, response, reportError); - } - - // case 3: unrecoverable error. - if (response.status !== 403) { - throw new Error(oneLine` - Could not fetch the profile on remote server. - Response was: ${response.status} ${response.statusText}. - `); - } - - // case 4: 403 errors can be transient while a profile is uploaded. - - if (i++ === MAX_WAIT_SECONDS) { - // In the last iteration we don't send a temporary error because we'll - // throw an error right after the while loop. - break; - } - - onTemporaryError( - new TemporaryError( - 'Profile not found on remote server.', - { count: i, total: MAX_WAIT_SECONDS + 1 } // 11 tries during 10 seconds - ) - ); - - await _wait(1000); - } - - throw new Error(oneLine` - Could not fetch the profile on remote server: - still not found after ${MAX_WAIT_SECONDS} seconds. - `); -} - -/** - * Deduce the file type from a url and content type. Third parties can give us - * arbitrary information, so make sure that we try out best to extract the proper - * information about it. - */ -function _deduceContentType( - url: string, - contentType: string | null -): 'application/json' | 'application/zip' | null { - if (contentType === 'application/zip' || contentType === 'application/json') { - return contentType; - } - if (url.match(/\.zip$/)) { - return 'application/zip'; - } - if (url.match(/\.json/)) { - return 'application/json'; - } - return null; -} - -/** - * This function guesses the correct content-type (even if one isn't sent) and then - * attempts to use the proper method to extract the response. - */ -async function _extractProfileOrZipFromResponse( - url: string, - response: Response, - reportError: (...data: Array) => void -): Promise { - const contentType = _deduceContentType( - url, - response.headers.get('content-type') - ); - switch (contentType) { - case 'application/zip': - return { - responseType: 'ZIP', - zip: await _extractZipFromResponse(response, reportError), - }; - case 'application/json': - case null: - // The content type is null if it is unknown, or an unsupported type. Go ahead - // and try to process it as a profile. - return { - responseType: 'PROFILE', - profile: await _extractJsonFromResponse( - response, - reportError, - contentType - ), - }; - default: - throw assertExhaustiveCheck(contentType); - } -} - -/** - * Attempt to load a zip file from a third party. This process can fail, so make sure - * to handle and report the error if it does. - */ -async function _extractZipFromResponse( - response: Response, - reportError: (...data: Array) => void -): Promise { - const buffer = await response.arrayBuffer(); - // Workaround for https://github.com/Stuk/jszip/issues/941 - // When running this code in tests, `buffer` doesn't inherits from _this_ - // realm's ArrayBuffer object, and this breaks JSZip which doesn't account for - // this case. We workaround the issue by wrapping the buffer in an Uint8Array - // that comes from this realm. - const typedBuffer = new Uint8Array(buffer); - try { - const JSZip = await import('jszip'); - const zip = await JSZip.loadAsync(typedBuffer); - // Catch the error if unable to load the zip. - return zip; - } catch (error) { - const message = 'Unable to open the archive file.'; - reportError(message); - reportError('Error:', error); - reportError('Fetch response:', response); - throw new Error( - `${message} The full error information has been printed out to the DevTool’s console.` - ); - } -} - -/** - * Parse JSON from an optionally gzipped array buffer. - */ -async function _extractJsonFromArrayBuffer( - arrayBuffer: ArrayBuffer -): Promise { - let profileBytes = new Uint8Array(arrayBuffer); - // Check for the gzip magic number in the header. - if (isGzip(profileBytes)) { - profileBytes = await decompress(profileBytes); - } - - const textDecoder = new TextDecoder(); - return JSON.parse(textDecoder.decode(profileBytes)); -} - -/** - * Don't trust third party responses, try and handle a variety of responses gracefully. - */ -async function _extractJsonFromResponse( - response: Response, - reportError: (...data: Array) => void, - fileType: 'application/json' | null -): Promise { - let arrayBuffer: ArrayBuffer | null = null; - try { - // await before returning so that we can catch JSON parse errors. - arrayBuffer = await response.arrayBuffer(); - return await _extractJsonFromArrayBuffer(arrayBuffer); - } catch (error) { - // Change the error message depending on the circumstance: - let message; - if (error && typeof error === 'object' && error.name === 'AbortError') { - message = 'The network request to load the profile was aborted.'; - } else if (fileType === 'application/json') { - message = 'The profile’s JSON could not be decoded.'; - } else if (fileType === null && arrayBuffer !== null) { - // If the content type is not specified, use a raw array buffer - // to fallback to other supported profile formats. - return arrayBuffer; - } else { - message = oneLine` - The profile could not be downloaded and decoded. This does not look like a supported file - type. - `; - } - - // Provide helpful debugging information to the console. - reportError(message); - reportError('JSON parsing error:', error); - reportError('Fetch response:', response); - - throw new Error( - `${message} The full error information has been printed out to the DevTool’s console.` - ); - } -} - -export function getProfileUrlForHash(hash: string): string { - // See https://cloud.google.com/storage/docs/access-public-data - // The URL is https://storage.googleapis.com//. - // https://.storage.googleapis.com/ seems to also work but - // is not documented nowadays. - - // By convention, "profile-store" is the name of our bucket, and the file path - // is the hash we receive in the URL. - return `https://storage.googleapis.com/${GOOGLE_STORAGE_BUCKET}/${hash}`; -} - export function retrieveProfileFromStore( hash: string, initialLoad: boolean = false @@ -1149,7 +894,7 @@ export function retrieveProfileOrZipFromUrl( dispatch(waitingForProfileFromUrl(profileUrl)); try { - const response: ProfileOrZip = await _fetchProfile({ + const response: ProfileOrZip = await fetchProfile({ url: profileUrl, onTemporaryError: (e: TemporaryError) => { dispatch(temporaryError(e)); @@ -1178,7 +923,7 @@ export function retrieveProfileOrZipFromUrl( default: throw assertExhaustiveCheck( response as never, - 'Expected to receive an archive or profile from _fetchProfile.' + 'Expected to receive an archive or profile from fetchProfile.' ); } } catch (error) { @@ -1233,7 +978,7 @@ export function retrieveProfileFromFile( dispatch(waitingForProfileFromFile()); try { - if (_deduceContentType(file.name, file.type) === 'application/zip') { + if (deduceContentType(file.name, file.type) === 'application/zip') { // Open a zip file in the zip file viewer const buffer = await fileReader(file).asArrayBuffer(); const JSZip = await import('jszip'); @@ -1332,14 +1077,14 @@ export function retrieveProfilesToCompare( 'Only public uploaded profiles are supported by the comparison function.' ); } - const response: ProfileOrZip = await _fetchProfile({ + const response: ProfileOrZip = await fetchProfile({ url: profileUrl, onTemporaryError: (e: TemporaryError) => { dispatch(temporaryError(e)); }, }); if (response.responseType !== 'PROFILE') { - throw new Error('Expected to receive a profile from _fetchProfile'); + throw new Error('Expected to receive a profile from fetchProfile'); } const serializedProfile = response.profile; diff --git a/src/test/components/Root-history.test.tsx b/src/test/components/Root-history.test.tsx index e2c34a6bf2..13a130aa71 100644 --- a/src/test/components/Root-history.test.tsx +++ b/src/test/components/Root-history.test.tsx @@ -11,7 +11,7 @@ import { import { Root } from '../../components/app/Root'; import { autoMockCanvasContext } from '../fixtures/mocks/canvas-context'; import { fireFullClick } from '../fixtures/utils'; -import { getProfileUrlForHash } from '../../actions/receive-profile'; +import { getProfileUrlForHash } from '../../utils/profile-fetch'; import { blankStore } from '../fixtures/stores'; import { getProfileFromTextSamples } from '../fixtures/profiles/processed-profile'; import { diff --git a/src/test/store/__snapshots__/receive-profile.test.ts.snap b/src/test/store/__snapshots__/receive-profile.test.ts.snap index ac9c30b543..02c2f06f5a 100644 --- a/src/test/store/__snapshots__/receive-profile.test.ts.snap +++ b/src/test/store/__snapshots__/receive-profile.test.ts.snap @@ -1,305 +1,5 @@ // Jest Snapshot v1, https://jestjs.io/docs/snapshot-testing -exports[`actions/receive-profile _fetchProfile fails if a bad profile JSON is passed in 1`] = `[Error: The profile’s JSON could not be decoded. The full error information has been printed out to the DevTool’s console.]`; - -exports[`actions/receive-profile _fetchProfile fails if a bad profile JSON is passed in 2`] = ` -Array [ - Array [ - "The profile’s JSON could not be decoded.", - ], - Array [ - "JSON parsing error:", - [SyntaxError: Unexpected token 'i', "invalid" is not valid JSON], - ], - Array [ - "Fetch response:", - Response { - Symbol(state): Object { - "aborted": false, - "body": Object { - "length": 7, - "source": Uint8Array [], - "stream": ReadableStream { - Symbol(kType): "ReadableStream", - Symbol(kState): Object { - "controller": ReadableByteStreamController { - Symbol(kType): "ReadableByteStreamController", - Symbol(kState): Object { - "autoAllocateChunkSize": undefined, - "byobRequest": null, - "cancelAlgorithm": undefined, - "closeRequested": false, - "highWaterMark": 0, - "pendingPullIntos": Array [], - "pullAgain": false, - "pullAlgorithm": undefined, - "pulling": false, - "queue": Array [], - "queueTotalSize": 0, - "started": true, - "stream": [Circular], - }, - }, - "disturbed": true, - "reader": ReadableStreamDefaultReader { - Symbol(kType): "ReadableStreamDefaultReader", - Symbol(kState): Object { - "close": Object { - "promise": Promise {}, - "reject": [Function], - "resolve": [Function], - }, - "readRequests": Array [], - "stream": [Circular], - }, - }, - "state": "closed", - "storedError": undefined, - "transfer": Object { - "port1": undefined, - "port2": undefined, - "promise": undefined, - "writable": undefined, - }, - }, - Symbol(nodejs.webstream.isClosedPromise): Object { - "promise": Promise {}, - "reject": [Function], - "resolve": [Function], - }, - Symbol(nodejs.webstream.controllerErrorFunction): [Function], - }, - }, - "cacheState": "", - "headersList": HeadersList { - "cookies": null, - Symbol(headers map): Map { - "content-length" => Object { - "name": "content-length", - "value": "7", - }, - "content-type" => Object { - "name": "content-type", - "value": "application/json", - }, - }, - Symbol(headers map sorted): null, - }, - "rangeRequested": false, - "requestIncludesCredentials": false, - "status": 200, - "statusText": "OK", - "timingAllowPassed": false, - "timingInfo": null, - "type": "default", - "urlList": Array [], - }, - Symbol(headers): Headers {}, - }, - ], -] -`; - -exports[`actions/receive-profile _fetchProfile fails if a bad profile JSON is passed in, with no content type 1`] = `[Error: The profile’s JSON could not be decoded. The full error information has been printed out to the DevTool’s console.]`; - -exports[`actions/receive-profile _fetchProfile fails if a bad profile JSON is passed in, with no content type 2`] = ` -Array [ - Array [ - "The profile’s JSON could not be decoded.", - ], - Array [ - "JSON parsing error:", - [SyntaxError: Unexpected token 'i', "invalid" is not valid JSON], - ], - Array [ - "Fetch response:", - Response { - Symbol(state): Object { - "aborted": false, - "body": Object { - "length": 7, - "source": Uint8Array [], - "stream": ReadableStream { - Symbol(kType): "ReadableStream", - Symbol(kState): Object { - "controller": ReadableByteStreamController { - Symbol(kType): "ReadableByteStreamController", - Symbol(kState): Object { - "autoAllocateChunkSize": undefined, - "byobRequest": null, - "cancelAlgorithm": undefined, - "closeRequested": false, - "highWaterMark": 0, - "pendingPullIntos": Array [], - "pullAgain": false, - "pullAlgorithm": undefined, - "pulling": false, - "queue": Array [], - "queueTotalSize": 0, - "started": true, - "stream": [Circular], - }, - }, - "disturbed": true, - "reader": ReadableStreamDefaultReader { - Symbol(kType): "ReadableStreamDefaultReader", - Symbol(kState): Object { - "close": Object { - "promise": Promise {}, - "reject": [Function], - "resolve": [Function], - }, - "readRequests": Array [], - "stream": [Circular], - }, - }, - "state": "closed", - "storedError": undefined, - "transfer": Object { - "port1": undefined, - "port2": undefined, - "promise": undefined, - "writable": undefined, - }, - }, - Symbol(nodejs.webstream.isClosedPromise): Object { - "promise": Promise {}, - "reject": [Function], - "resolve": [Function], - }, - Symbol(nodejs.webstream.controllerErrorFunction): [Function], - }, - }, - "cacheState": "", - "headersList": HeadersList { - "cookies": null, - Symbol(headers map): Map { - "content-length" => Object { - "name": "content-length", - "value": "7", - }, - "content-type" => Object { - "name": "content-type", - "value": "undefined", - }, - }, - Symbol(headers map sorted): null, - }, - "rangeRequested": false, - "requestIncludesCredentials": false, - "status": 200, - "statusText": "OK", - "timingAllowPassed": false, - "timingInfo": null, - "type": "default", - "urlList": Array [], - }, - Symbol(headers): Headers {}, - }, - ], -] -`; - -exports[`actions/receive-profile _fetchProfile fails if a bad zip file is passed in 1`] = `[Error: Unable to open the archive file. The full error information has been printed out to the DevTool’s console.]`; - -exports[`actions/receive-profile _fetchProfile fails if a bad zip file is passed in 2`] = ` -Array [ - Array [ - "Unable to open the archive file.", - ], - Array [ - "Error:", - [Error: Can't find end of central directory : is this a zip file ? If it is, see https://stuk.github.io/jszip/documentation/howto/read_zip.html], - ], - Array [ - "Fetch response:", - Response { - Symbol(state): Object { - "aborted": false, - "body": Object { - "length": 4, - "source": Uint8Array [], - "stream": ReadableStream { - Symbol(kType): "ReadableStream", - Symbol(kState): Object { - "controller": ReadableByteStreamController { - Symbol(kType): "ReadableByteStreamController", - Symbol(kState): Object { - "autoAllocateChunkSize": undefined, - "byobRequest": null, - "cancelAlgorithm": undefined, - "closeRequested": false, - "highWaterMark": 0, - "pendingPullIntos": Array [], - "pullAgain": false, - "pullAlgorithm": undefined, - "pulling": false, - "queue": Array [], - "queueTotalSize": 0, - "started": true, - "stream": [Circular], - }, - }, - "disturbed": true, - "reader": ReadableStreamDefaultReader { - Symbol(kType): "ReadableStreamDefaultReader", - Symbol(kState): Object { - "close": Object { - "promise": Promise {}, - "reject": [Function], - "resolve": [Function], - }, - "readRequests": Array [], - "stream": [Circular], - }, - }, - "state": "closed", - "storedError": undefined, - "transfer": Object { - "port1": undefined, - "port2": undefined, - "promise": undefined, - "writable": undefined, - }, - }, - Symbol(nodejs.webstream.isClosedPromise): Object { - "promise": Promise {}, - "reject": [Function], - "resolve": [Function], - }, - Symbol(nodejs.webstream.controllerErrorFunction): [Function], - }, - }, - "cacheState": "", - "headersList": HeadersList { - "cookies": null, - Symbol(headers map): Map { - "content-length" => Object { - "name": "content-length", - "value": "4", - }, - "content-type" => Object { - "name": "content-type", - "value": "application/zip", - }, - }, - Symbol(headers map sorted): null, - }, - "rangeRequested": false, - "requestIncludesCredentials": false, - "status": 200, - "statusText": "OK", - "timingAllowPassed": false, - "timingInfo": null, - "type": "default", - "urlList": Array [], - }, - Symbol(headers): Headers {}, - }, - ], -] -`; - exports[`actions/receive-profile retrieveProfileFromFile will be an error to view a profile with no threads 1`] = `"No threads were captured in this profile, there is nothing to display."`; exports[`actions/receive-profile retrieveProfileFromFile will give an error when unable to decompress a zipped profile 1`] = `[Error: Can't find end of central directory : is this a zip file ? If it is, see https://stuk.github.io/jszip/documentation/howto/read_zip.html]`; diff --git a/src/test/store/receive-profile.test.ts b/src/test/store/receive-profile.test.ts index bd0d50cdea..f825ca1aed 100644 --- a/src/test/store/receive-profile.test.ts +++ b/src/test/store/receive-profile.test.ts @@ -27,9 +27,9 @@ import { retrieveProfileOrZipFromUrl, retrieveProfileFromFile, retrieveProfilesToCompare, - _fetchProfile, retrieveProfileForRawUrl, } from '../../actions/receive-profile'; +import { fetchProfile as _fetchProfile } from '../../utils/profile-fetch'; import { SymbolsNotFoundError } from '../../profile-logic/errors'; import { createGeckoProfile } from '../fixtures/profiles/gecko-profile'; @@ -1144,193 +1144,6 @@ describe('actions/receive-profile', function () { }); }); - /** - * _fetchProfile is a helper function for the actions, but it is tested separately - * since it has a decent amount of complexity around different issues with loading - * in different support URL formats. It's mainly testing what happens when JSON - * and zip file is sent, and what happens when things fail. - */ - describe('_fetchProfile', function () { - /** - * This helper function encapsulates various configurations for the type of content - * as well and response headers. - */ - async function configureFetch(obj: { - url: string; - contentType?: string; - content: 'generated-zip' | 'generated-json' | Uint8Array; - }) { - const { url, contentType, content } = obj; - const stringProfile = serializeProfile(_getSimpleProfile()); - const profile = JSON.parse(stringProfile); - let arrayBuffer; - - switch (content) { - case 'generated-zip': { - const zip = new JSZip(); - zip.file('profile.json', stringProfile); - arrayBuffer = await zip.generateAsync({ type: 'uint8array' }); - break; - } - case 'generated-json': - arrayBuffer = encode(stringProfile); - break; - default: - arrayBuffer = content; - break; - } - - window.fetchMock.catch(403).get(url, { - body: arrayBuffer, - headers: { - 'content-type': contentType, - }, - }); - - const reportError = jest.fn(); - const args = { - url, - onTemporaryError: () => {}, - reportError, - }; - - // Return fetch's args, based on the inputs. - return { profile, args, reportError }; - } - - it('fetches a normal profile with the correct content-type headers', async function () { - const { profile, args } = await configureFetch({ - url: 'https://example.com/profile.json', - contentType: 'application/json', - content: 'generated-json', - }); - - const profileOrZip = await _fetchProfile(args); - expect(profileOrZip).toEqual({ responseType: 'PROFILE', profile }); - }); - - it('fetches a zipped profile with correct content-type headers', async function () { - const { args, reportError } = await configureFetch({ - url: 'https://example.com/profile.zip', - contentType: 'application/zip', - content: 'generated-zip', - }); - - const profileOrZip = await _fetchProfile(args); - expect(profileOrZip.responseType).toBe('ZIP'); - expect(reportError.mock.calls.length).toBe(0); - }); - - it('fetches a zipped profile with incorrect content-type headers, but .zip extension', async function () { - const { args, reportError } = await configureFetch({ - url: 'https://example.com/profile.zip', - content: 'generated-zip', - }); - - const profileOrZip = await _fetchProfile(args); - expect(profileOrZip.responseType).toBe('ZIP'); - expect(reportError.mock.calls.length).toBe(0); - }); - - it('fetches a profile with incorrect content-type headers, but .json extension', async function () { - const { profile, args, reportError } = await configureFetch({ - url: 'https://example.com/profile.json', - content: 'generated-json', - }); - - const profileOrZip = await _fetchProfile(args); - expect(profileOrZip).toEqual({ responseType: 'PROFILE', profile }); - expect(reportError.mock.calls.length).toBe(0); - }); - - it('fetches a profile with incorrect content-type headers, no known extension, and attempts to JSON parse it it', async function () { - const { profile, args, reportError } = await configureFetch({ - url: 'https://example.com/profile.file', - content: 'generated-json', - }); - - const profileOrZip = await _fetchProfile(args); - expect(profileOrZip).toEqual({ responseType: 'PROFILE', profile }); - expect(reportError.mock.calls.length).toBe(0); - }); - - it('fails if a bad zip file is passed in', async function () { - const { args, reportError } = await configureFetch({ - url: 'https://example.com/profile.file', - contentType: 'application/zip', - content: new Uint8Array([0, 1, 2, 3]), - }); - - let userFacingError; - try { - await _fetchProfile(args); - } catch (error) { - userFacingError = error; - } - expect(userFacingError).toMatchSnapshot(); - expect(reportError.mock.calls.length).toBeGreaterThan(0); - expect(reportError.mock.calls).toMatchSnapshot(); - }); - - it('fails if a bad profile JSON is passed in', async function () { - const invalidJSON = 'invalid'; - const { args, reportError } = await configureFetch({ - url: 'https://example.com/profile.json', - contentType: 'application/json', - content: encode(invalidJSON), - }); - - let userFacingError; - try { - await _fetchProfile(args); - } catch (error) { - userFacingError = error; - } - expect(userFacingError).toMatchSnapshot(); - expect(reportError.mock.calls.length).toBeGreaterThan(0); - expect(reportError.mock.calls).toMatchSnapshot(); - }); - - it('fails if a bad profile JSON is passed in, with no content type', async function () { - const invalidJSON = 'invalid'; - const { args, reportError } = await configureFetch({ - url: 'https://example.com/profile.json', - content: encode(invalidJSON), - }); - - let userFacingError; - try { - await _fetchProfile(args); - } catch (error) { - userFacingError = error; - } - expect(userFacingError).toMatchSnapshot(); - expect(reportError.mock.calls.length).toBeGreaterThan(0); - expect(reportError.mock.calls).toMatchSnapshot(); - }); - - it('fallback behavior if a completely unknown file is passed in', async function () { - const invalidJSON = 'invalid'; - const profile = encode(invalidJSON); - const { args } = await configureFetch({ - url: 'https://example.com/profile.unknown', - content: profile, - }); - - let userFacingError = null; - try { - const profileOrZip = await _fetchProfile(args); - expect(profileOrZip).toEqual({ - responseType: 'PROFILE', - profile: profile.buffer, - }); - } catch (error) { - userFacingError = error; - } - expect(userFacingError).toBeNull(); - }); - }); - describe('retrieveProfileFromFile', function () { /** * Bypass all of Flow's checks, and mock out the file interface. diff --git a/src/test/unit/__snapshots__/profile-fetch.test.ts.snap b/src/test/unit/__snapshots__/profile-fetch.test.ts.snap new file mode 100644 index 0000000000..e1487c1549 --- /dev/null +++ b/src/test/unit/__snapshots__/profile-fetch.test.ts.snap @@ -0,0 +1,301 @@ +// Jest Snapshot v1, https://jestjs.io/docs/snapshot-testing + +exports[`fetchProfile fails if a bad profile JSON is passed in 1`] = `[Error: The profile’s JSON could not be decoded. The full error information has been printed out to the DevTool’s console.]`; + +exports[`fetchProfile fails if a bad profile JSON is passed in 2`] = ` +Array [ + Array [ + "The profile’s JSON could not be decoded.", + ], + Array [ + "JSON parsing error:", + [SyntaxError: Unexpected token 'i', "invalid" is not valid JSON], + ], + Array [ + "Fetch response:", + Response { + Symbol(state): Object { + "aborted": false, + "body": Object { + "length": 7, + "source": Uint8Array [], + "stream": ReadableStream { + Symbol(kType): "ReadableStream", + Symbol(kState): Object { + "controller": ReadableByteStreamController { + Symbol(kType): "ReadableByteStreamController", + Symbol(kState): Object { + "autoAllocateChunkSize": undefined, + "byobRequest": null, + "cancelAlgorithm": undefined, + "closeRequested": false, + "highWaterMark": 0, + "pendingPullIntos": Array [], + "pullAgain": false, + "pullAlgorithm": undefined, + "pulling": false, + "queue": Array [], + "queueTotalSize": 0, + "started": true, + "stream": [Circular], + }, + }, + "disturbed": true, + "reader": ReadableStreamDefaultReader { + Symbol(kType): "ReadableStreamDefaultReader", + Symbol(kState): Object { + "close": Object { + "promise": Promise {}, + "reject": [Function], + "resolve": [Function], + }, + "readRequests": Array [], + "stream": [Circular], + }, + }, + "state": "closed", + "storedError": undefined, + "transfer": Object { + "port1": undefined, + "port2": undefined, + "promise": undefined, + "writable": undefined, + }, + }, + Symbol(nodejs.webstream.isClosedPromise): Object { + "promise": Promise {}, + "reject": [Function], + "resolve": [Function], + }, + Symbol(nodejs.webstream.controllerErrorFunction): [Function], + }, + }, + "cacheState": "", + "headersList": HeadersList { + "cookies": null, + Symbol(headers map): Map { + "content-length" => Object { + "name": "content-length", + "value": "7", + }, + "content-type" => Object { + "name": "content-type", + "value": "application/json", + }, + }, + Symbol(headers map sorted): null, + }, + "rangeRequested": false, + "requestIncludesCredentials": false, + "status": 200, + "statusText": "OK", + "timingAllowPassed": false, + "timingInfo": null, + "type": "default", + "urlList": Array [], + }, + Symbol(headers): Headers {}, + }, + ], +] +`; + +exports[`fetchProfile fails if a bad profile JSON is passed in, with no content type 1`] = `[Error: The profile’s JSON could not be decoded. The full error information has been printed out to the DevTool’s console.]`; + +exports[`fetchProfile fails if a bad profile JSON is passed in, with no content type 2`] = ` +Array [ + Array [ + "The profile’s JSON could not be decoded.", + ], + Array [ + "JSON parsing error:", + [SyntaxError: Unexpected token 'i', "invalid" is not valid JSON], + ], + Array [ + "Fetch response:", + Response { + Symbol(state): Object { + "aborted": false, + "body": Object { + "length": 7, + "source": Uint8Array [], + "stream": ReadableStream { + Symbol(kType): "ReadableStream", + Symbol(kState): Object { + "controller": ReadableByteStreamController { + Symbol(kType): "ReadableByteStreamController", + Symbol(kState): Object { + "autoAllocateChunkSize": undefined, + "byobRequest": null, + "cancelAlgorithm": undefined, + "closeRequested": false, + "highWaterMark": 0, + "pendingPullIntos": Array [], + "pullAgain": false, + "pullAlgorithm": undefined, + "pulling": false, + "queue": Array [], + "queueTotalSize": 0, + "started": true, + "stream": [Circular], + }, + }, + "disturbed": true, + "reader": ReadableStreamDefaultReader { + Symbol(kType): "ReadableStreamDefaultReader", + Symbol(kState): Object { + "close": Object { + "promise": Promise {}, + "reject": [Function], + "resolve": [Function], + }, + "readRequests": Array [], + "stream": [Circular], + }, + }, + "state": "closed", + "storedError": undefined, + "transfer": Object { + "port1": undefined, + "port2": undefined, + "promise": undefined, + "writable": undefined, + }, + }, + Symbol(nodejs.webstream.isClosedPromise): Object { + "promise": Promise {}, + "reject": [Function], + "resolve": [Function], + }, + Symbol(nodejs.webstream.controllerErrorFunction): [Function], + }, + }, + "cacheState": "", + "headersList": HeadersList { + "cookies": null, + Symbol(headers map): Map { + "content-length" => Object { + "name": "content-length", + "value": "7", + }, + "content-type" => Object { + "name": "content-type", + "value": "undefined", + }, + }, + Symbol(headers map sorted): null, + }, + "rangeRequested": false, + "requestIncludesCredentials": false, + "status": 200, + "statusText": "OK", + "timingAllowPassed": false, + "timingInfo": null, + "type": "default", + "urlList": Array [], + }, + Symbol(headers): Headers {}, + }, + ], +] +`; + +exports[`fetchProfile fails if a bad zip file is passed in 1`] = `[Error: Unable to open the archive file. The full error information has been printed out to the DevTool’s console.]`; + +exports[`fetchProfile fails if a bad zip file is passed in 2`] = ` +Array [ + Array [ + "Unable to open the archive file.", + ], + Array [ + "Error:", + [Error: Can't find end of central directory : is this a zip file ? If it is, see https://stuk.github.io/jszip/documentation/howto/read_zip.html], + ], + Array [ + "Fetch response:", + Response { + Symbol(state): Object { + "aborted": false, + "body": Object { + "length": 4, + "source": Uint8Array [], + "stream": ReadableStream { + Symbol(kType): "ReadableStream", + Symbol(kState): Object { + "controller": ReadableByteStreamController { + Symbol(kType): "ReadableByteStreamController", + Symbol(kState): Object { + "autoAllocateChunkSize": undefined, + "byobRequest": null, + "cancelAlgorithm": undefined, + "closeRequested": false, + "highWaterMark": 0, + "pendingPullIntos": Array [], + "pullAgain": false, + "pullAlgorithm": undefined, + "pulling": false, + "queue": Array [], + "queueTotalSize": 0, + "started": true, + "stream": [Circular], + }, + }, + "disturbed": true, + "reader": ReadableStreamDefaultReader { + Symbol(kType): "ReadableStreamDefaultReader", + Symbol(kState): Object { + "close": Object { + "promise": Promise {}, + "reject": [Function], + "resolve": [Function], + }, + "readRequests": Array [], + "stream": [Circular], + }, + }, + "state": "closed", + "storedError": undefined, + "transfer": Object { + "port1": undefined, + "port2": undefined, + "promise": undefined, + "writable": undefined, + }, + }, + Symbol(nodejs.webstream.isClosedPromise): Object { + "promise": Promise {}, + "reject": [Function], + "resolve": [Function], + }, + Symbol(nodejs.webstream.controllerErrorFunction): [Function], + }, + }, + "cacheState": "", + "headersList": HeadersList { + "cookies": null, + Symbol(headers map): Map { + "content-length" => Object { + "name": "content-length", + "value": "4", + }, + "content-type" => Object { + "name": "content-type", + "value": "application/zip", + }, + }, + Symbol(headers map sorted): null, + }, + "rangeRequested": false, + "requestIncludesCredentials": false, + "status": 200, + "statusText": "OK", + "timingAllowPassed": false, + "timingInfo": null, + "type": "default", + "urlList": Array [], + }, + Symbol(headers): Headers {}, + }, + ], +] +`; diff --git a/src/test/unit/profile-fetch.test.ts b/src/test/unit/profile-fetch.test.ts new file mode 100644 index 0000000000..8fb0f6a40f --- /dev/null +++ b/src/test/unit/profile-fetch.test.ts @@ -0,0 +1,207 @@ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ +import JSZip from 'jszip'; + +import { fetchProfile } from '../../utils/profile-fetch'; +import { serializeProfile } from '../../profile-logic/process-profile'; +import { getProfileFromTextSamples } from '../fixtures/profiles/processed-profile'; + +import type { Profile } from 'firefox-profiler/types'; + +function encode(string: string): Uint8Array { + return new TextEncoder().encode(string); +} + +/** + * This profile will have a single sample, and a single thread. + */ +function _getSimpleProfile(): Profile { + return getProfileFromTextSamples('A').profile; +} + +/** + * fetchProfile has a decent amount of complexity around different issues with loading + * in different support URL formats. It's mainly testing what happens when JSON + * and zip file is sent, and what happens when things fail. + */ +describe('fetchProfile', function () { + /** + * This helper function encapsulates various configurations for the type of content + * as well and response headers. + */ + async function configureFetch(obj: { + url: string; + contentType?: string; + content: 'generated-zip' | 'generated-json' | Uint8Array; + }) { + const { url, contentType, content } = obj; + const stringProfile = serializeProfile(_getSimpleProfile()); + const profile = JSON.parse(stringProfile); + let arrayBuffer; + + switch (content) { + case 'generated-zip': { + const zip = new JSZip(); + zip.file('profile.json', stringProfile); + arrayBuffer = await zip.generateAsync({ type: 'uint8array' }); + break; + } + case 'generated-json': + arrayBuffer = encode(stringProfile); + break; + default: + arrayBuffer = content; + break; + } + + window.fetchMock.catch(403).get(url, { + body: arrayBuffer, + headers: { + 'content-type': contentType, + }, + }); + + const reportError = jest.fn(); + const args = { + url, + onTemporaryError: () => {}, + reportError, + }; + + // Return fetch's args, based on the inputs. + return { profile, args, reportError }; + } + + it('fetches a normal profile with the correct content-type headers', async function () { + const { profile, args } = await configureFetch({ + url: 'https://example.com/profile.json', + contentType: 'application/json', + content: 'generated-json', + }); + + const profileOrZip = await fetchProfile(args); + expect(profileOrZip).toEqual({ responseType: 'PROFILE', profile }); + }); + + it('fetches a zipped profile with correct content-type headers', async function () { + const { args, reportError } = await configureFetch({ + url: 'https://example.com/profile.zip', + contentType: 'application/zip', + content: 'generated-zip', + }); + + const profileOrZip = await fetchProfile(args); + expect(profileOrZip.responseType).toBe('ZIP'); + expect(reportError.mock.calls.length).toBe(0); + }); + + it('fetches a zipped profile with incorrect content-type headers, but .zip extension', async function () { + const { args, reportError } = await configureFetch({ + url: 'https://example.com/profile.zip', + content: 'generated-zip', + }); + + const profileOrZip = await fetchProfile(args); + expect(profileOrZip.responseType).toBe('ZIP'); + expect(reportError.mock.calls.length).toBe(0); + }); + + it('fetches a profile with incorrect content-type headers, but .json extension', async function () { + const { profile, args, reportError } = await configureFetch({ + url: 'https://example.com/profile.json', + content: 'generated-json', + }); + + const profileOrZip = await fetchProfile(args); + expect(profileOrZip).toEqual({ responseType: 'PROFILE', profile }); + expect(reportError.mock.calls.length).toBe(0); + }); + + it('fetches a profile with incorrect content-type headers, no known extension, and attempts to JSON parse it it', async function () { + const { profile, args, reportError } = await configureFetch({ + url: 'https://example.com/profile.file', + content: 'generated-json', + }); + + const profileOrZip = await fetchProfile(args); + expect(profileOrZip).toEqual({ responseType: 'PROFILE', profile }); + expect(reportError.mock.calls.length).toBe(0); + }); + + it('fails if a bad zip file is passed in', async function () { + const { args, reportError } = await configureFetch({ + url: 'https://example.com/profile.file', + contentType: 'application/zip', + content: new Uint8Array([0, 1, 2, 3]), + }); + + let userFacingError; + try { + await fetchProfile(args); + } catch (error) { + userFacingError = error; + } + expect(userFacingError).toMatchSnapshot(); + expect(reportError.mock.calls.length).toBeGreaterThan(0); + expect(reportError.mock.calls).toMatchSnapshot(); + }); + + it('fails if a bad profile JSON is passed in', async function () { + const invalidJSON = 'invalid'; + const { args, reportError } = await configureFetch({ + url: 'https://example.com/profile.json', + contentType: 'application/json', + content: encode(invalidJSON), + }); + + let userFacingError; + try { + await fetchProfile(args); + } catch (error) { + userFacingError = error; + } + expect(userFacingError).toMatchSnapshot(); + expect(reportError.mock.calls.length).toBeGreaterThan(0); + expect(reportError.mock.calls).toMatchSnapshot(); + }); + + it('fails if a bad profile JSON is passed in, with no content type', async function () { + const invalidJSON = 'invalid'; + const { args, reportError } = await configureFetch({ + url: 'https://example.com/profile.json', + content: encode(invalidJSON), + }); + + let userFacingError; + try { + await fetchProfile(args); + } catch (error) { + userFacingError = error; + } + expect(userFacingError).toMatchSnapshot(); + expect(reportError.mock.calls.length).toBeGreaterThan(0); + expect(reportError.mock.calls).toMatchSnapshot(); + }); + + it('fallback behavior if a completely unknown file is passed in', async function () { + const invalidJSON = 'invalid'; + const profile = encode(invalidJSON); + const { args } = await configureFetch({ + url: 'https://example.com/profile.unknown', + content: profile, + }); + + let userFacingError = null; + try { + const profileOrZip = await fetchProfile(args); + expect(profileOrZip).toEqual({ + responseType: 'PROFILE', + profile: profile.buffer, + }); + } catch (error) { + userFacingError = error; + } + expect(userFacingError).toBeNull(); + }); +}); diff --git a/src/utils/profile-fetch.ts b/src/utils/profile-fetch.ts new file mode 100644 index 0000000000..c9711f7adf --- /dev/null +++ b/src/utils/profile-fetch.ts @@ -0,0 +1,346 @@ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +import { oneLine } from 'common-tags'; +import { assertExhaustiveCheck } from './types'; +import { TemporaryError } from './errors'; +import { decompress, isGzip } from './gz'; +import { isLocalURL } from './url'; +import { GOOGLE_STORAGE_BUCKET } from 'firefox-profiler/app-logic/constants'; +import type JSZip from 'jszip'; + +/** + * Shared utilities for fetching profiles from URLs. + * Used by both the web app (receive-profile.ts) and the CLI (profile-query). + * + * This module was extracted from receive-profile.ts to make the fetching + * logic reusable across different contexts (Redux vs CLI). + */ + +/** + * Convert a profile hash to its Google Cloud Storage URL. + * Public profiles are stored in Google Cloud Storage in the profile-store bucket. + * See https://cloud.google.com/storage/docs/access-public-data + */ +export function getProfileUrlForHash(hash: string): string { + return `https://storage.googleapis.com/${GOOGLE_STORAGE_BUCKET}/${hash}`; +} + +/** + * Extract the actual profile URL from a profiler.firefox.com URL. + * + * Parses URLs like: + * - https://profiler.firefox.com/from-url/http%3A%2F%2F127.0.0.1%3A3000%2Fprofile.json/ + * - https://profiler.firefox.com/public/g9w0fmjjx4bqrky4zg0wb90n65b8g3w0qjjx1t0/calltree/ + * + * Returns the decoded profile URL, or null if this is not a supported datasource. + * This mimics the logic in retrieveProfileFromStore and retrieveProfileForRawUrl + * from receive-profile.ts + */ +export function extractProfileUrlFromProfilerUrl( + profilerUrl: string +): string | null { + try { + // Handle both full URLs and just pathnames + let pathname: string; + if ( + profilerUrl.startsWith('http://') || + profilerUrl.startsWith('https://') + ) { + const url = new URL(profilerUrl); + pathname = url.pathname; + } else { + pathname = profilerUrl; + } + + const pathParts = pathname.split('/').filter((d) => d); + + // Check if this is a from-url datasource + // URL structure: /from-url/{encoded-profile-url}/... + if (pathParts[0] === 'from-url' && pathParts[1]) { + return decodeURIComponent(pathParts[1]); + } + + // Check if this is a public datasource + // URL structure: /public/{hash}/... + // Profile is stored in Google Cloud Storage + if (pathParts[0] === 'public' && pathParts[1]) { + const hash = pathParts[1]; + return getProfileUrlForHash(hash); + } + + return null; + } catch (error) { + console.error('Failed to parse profiler URL:', error); + return null; + } +} + +function _wait(delayMs: number): Promise { + return new Promise((resolve) => setTimeout(resolve, delayMs)); +} + +/** + * Check if a load failure is likely due to Safari's localhost HTTP restriction. + * Safari blocks mixed content (HTTP on HTTPS page) even for localhost. + * This check works in both browser and Node.js (returns false in Node). + */ +function _loadProbablyFailedDueToSafariLocalhostHTTPRestriction( + url: string, + error: Error +): boolean { + // In Node.js, navigator won't exist + if ( + typeof navigator === 'undefined' || + !navigator.userAgent.match(/Safari\/\d+\.\d+/) + ) { + return false; + } + // Check if Safari considers this mixed content. + try { + const parsedUrl = new URL(url); + return ( + error.name === 'TypeError' && + parsedUrl.protocol === 'http:' && + isLocalURL(parsedUrl) && + typeof location !== 'undefined' && + location.protocol === 'https:' + ); + } catch { + return false; + } +} + +export class SafariLocalhostHTTPLoadError extends Error { + override name = 'SafariLocalhostHTTPLoadError'; +} + +/** + * Deduce the file type from a URL and content type. + * This is used to detect zip files vs profile files. + * Exported for use in receive-profile.ts for file handling. + */ +export function deduceContentType( + url: string, + contentType: string | null +): 'application/json' | 'application/zip' | null { + if (contentType === 'application/zip' || contentType === 'application/json') { + return contentType; + } + if (url.match(/\.zip$/)) { + return 'application/zip'; + } + if (url.match(/\.json/)) { + return 'application/json'; + } + return null; +} + +/** + * Parse JSON from an optionally gzipped array buffer. + * Exported for use in receive-profile.ts for direct file processing. + */ +export async function extractJsonFromArrayBuffer( + arrayBuffer: ArrayBuffer +): Promise { + let profileBytes = new Uint8Array(arrayBuffer); + // Check for the gzip magic number in the header. + if (isGzip(profileBytes)) { + profileBytes = await decompress(profileBytes); + } + + const textDecoder = new TextDecoder(); + return JSON.parse(textDecoder.decode(profileBytes)); +} + +/** + * Don't trust third party responses, try and handle a variety of responses gracefully. + */ +async function _extractJsonFromResponse( + response: Response, + reportError: (...data: Array) => void, + fileType: 'application/json' | null +): Promise { + let arrayBuffer: ArrayBuffer | null = null; + try { + // await before returning so that we can catch JSON parse errors. + arrayBuffer = await response.arrayBuffer(); + return await extractJsonFromArrayBuffer(arrayBuffer); + } catch (error) { + // Change the error message depending on the circumstance: + let message; + if (error && typeof error === 'object' && error.name === 'AbortError') { + message = 'The network request to load the profile was aborted.'; + } else if (fileType === 'application/json') { + message = 'The profile’s JSON could not be decoded.'; + } else if (fileType === null && arrayBuffer !== null) { + // If the content type is not specified, use a raw array buffer + // to fallback to other supported profile formats. + return arrayBuffer; + } else { + message = oneLine` + The profile could not be downloaded and decoded. This does not look like a supported file + type. + `; + } + + // Provide helpful debugging information to the console. + reportError(message); + reportError('JSON parsing error:', error); + reportError('Fetch response:', response); + + throw new Error( + `${message} The full error information has been printed out to the DevTool’s console.` + ); + } +} + +/** + * Attempt to load a zip file from a third party. This process can fail, so make sure + * to handle and report the error if it does. + */ +async function _extractZipFromResponse( + response: Response, + reportError: (...data: Array) => void +): Promise { + const buffer = await response.arrayBuffer(); + // Workaround for https://github.com/Stuk/jszip/issues/941 + // When running this code in tests, `buffer` doesn't inherits from _this_ + // realm's ArrayBuffer object, and this breaks JSZip which doesn't account for + // this case. We workaround the issue by wrapping the buffer in an Uint8Array + // that comes from this realm. + const typedBuffer = new Uint8Array(buffer); + try { + const JSZip = await import('jszip'); + const zip = await JSZip.loadAsync(typedBuffer); + // Catch the error if unable to load the zip. + return zip; + } catch (error) { + const message = 'Unable to open the archive file.'; + reportError(message); + reportError('Error:', error); + reportError('Fetch response:', response); + throw new Error( + `${message} The full error information has been printed out to the DevTool’s console.` + ); + } +} + +export type ProfileOrZip = + | { responseType: 'PROFILE'; profile: unknown } + | { responseType: 'ZIP'; zip: JSZip }; + +/** + * This function guesses the correct content-type (even if one isn't sent) and then + * attempts to use the proper method to extract the response. + */ +async function _extractProfileOrZipFromResponse( + url: string, + response: Response, + reportError: (...data: Array) => void +): Promise { + const contentType = deduceContentType( + url, + response.headers.get('content-type') + ); + switch (contentType) { + case 'application/zip': + return { + responseType: 'ZIP', + zip: await _extractZipFromResponse(response, reportError), + }; + case 'application/json': + case null: + // The content type is null if it is unknown, or an unsupported type. Go ahead + // and try to process it as a profile. + return { + responseType: 'PROFILE', + profile: await _extractJsonFromResponse( + response, + reportError, + contentType + ), + }; + default: + throw assertExhaustiveCheck(contentType); + } +} + +export type FetchProfileArgs = { + url: string; + onTemporaryError: (param: TemporaryError) => void; + // Allow tests to capture the reported error, but normally use console.error. + reportError?: (...data: Array) => void; +}; + +/** + * Tries to fetch a profile on `url`. If the profile is not found, + * `onTemporaryError` is called with an appropriate error, we wait 1 second, and + * then tries again. If we still can't find the profile after 11 tries, the + * returned promise is rejected with a fatal error. + * If we can retrieve the profile properly, the returned promise is resolved + * with the parsed profile or zip file. + * + * This function was moved from receive-profile.ts to make it reusable by + * both the web app and CLI. + */ +export async function fetchProfile( + args: FetchProfileArgs +): Promise { + const MAX_WAIT_SECONDS = 10; + let i = 0; + const { url, onTemporaryError } = args; + // Allow tests to capture the reported error, but normally use console.error. + const reportError = args.reportError || console.error; + + while (true) { + let response; + try { + response = await fetch(url); + } catch (e) { + // Case 1: Exception. + if ( + _loadProbablyFailedDueToSafariLocalhostHTTPRestriction(url, e as Error) + ) { + throw new SafariLocalhostHTTPLoadError(); + } + throw e; + } + + // Case 2: successful answer. + if (response.ok) { + return _extractProfileOrZipFromResponse(url, response, reportError); + } + + // case 3: unrecoverable error. + if (response.status !== 403) { + throw new Error(oneLine` + Could not fetch the profile on remote server. + Response was: ${response.status} ${response.statusText}. + `); + } + + // case 4: 403 errors can be transient while a profile is uploaded. + + if (i++ === MAX_WAIT_SECONDS) { + // In the last iteration we don't send a temporary error because we'll + // throw an error right after the while loop. + break; + } + + onTemporaryError( + new TemporaryError( + 'Profile not found on remote server.', + { count: i, total: MAX_WAIT_SECONDS + 1 } // 11 tries during 10 seconds + ) + ); + + await _wait(1000); + } + + throw new Error(oneLine` + Could not fetch the profile on remote server: + still not found after ${MAX_WAIT_SECONDS} seconds. + `); +} From 543c3258740a58790f09660cbdcb8d1106b04ba5 Mon Sep 17 00:00:00 2001 From: Markus Stange Date: Fri, 25 Jul 2025 21:14:45 -0400 Subject: [PATCH 6/7] Add utilities for summarizing CPU usage timelines in a textual form. --- src/profile-logic/combined-cpu.ts | 121 +++++++++++++ src/selectors/per-thread/thread.tsx | 14 ++ src/selectors/profile.ts | 96 +++++++++++ src/test/unit/activity-slice-tree.test.js | 39 +++++ src/test/unit/combined-cpu.test.ts | 92 ++++++++++ src/utils/slice-tree.ts | 199 ++++++++++++++++++++++ src/utils/window-console.ts | 10 ++ 7 files changed, 571 insertions(+) create mode 100644 src/profile-logic/combined-cpu.ts create mode 100644 src/test/unit/activity-slice-tree.test.js create mode 100644 src/test/unit/combined-cpu.test.ts create mode 100644 src/utils/slice-tree.ts diff --git a/src/profile-logic/combined-cpu.ts b/src/profile-logic/combined-cpu.ts new file mode 100644 index 0000000000..0ba89d513f --- /dev/null +++ b/src/profile-logic/combined-cpu.ts @@ -0,0 +1,121 @@ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +import type { SamplesTable } from 'firefox-profiler/types'; + +/** + * Represents CPU usage over time for a single thread. + */ +export type CpuRatioTimeSeries = { + time: number[]; + cpuRatio: Float64Array; + maxCpuRatio: number; + length: number; +}; + +/** + * Combines CPU usage data from multiple threads into a single timeline. + * + * This function takes CPU ratio data from multiple threads, each with potentially + * different sampling times, and creates a unified timeline where CPU ratios are + * summed. The result can exceed 1.0 when multiple threads are active simultaneously. + * + * The algorithm: + * 1. Maintains a cursor for each thread tracking the current sample index + * 2. Processes all sample times in ascending order (using a min-heap approach) + * 3. For each time point, sums CPU ratios from threads that are active at that time + * 4. A thread is considered active only between its first and last sample times + * + * Note: cpuRatio[i] represents CPU usage between time[i-1] and time[i], so we don't + * extend a thread's CPU usage beyond its last sample time. + * + * @param threadSamples - Array of SamplesTable objects, one per thread + * @returns Combined CPU data with unified time array and summed CPU ratios, + * or null if no threads have CPU data + */ +export function combineCPUDataFromThreads( + threadSamples: SamplesTable[] +): CpuRatioTimeSeries | null { + // Filter threads that have CPU ratio data. + // We require at least two samples per thread; the first sample's CPU ratio + // is meaningless. samples.threadCPURatio[1] is the CPU percentage between + // samples.time[0] and samples.time[1]. + const threadsWithCPU: CpuRatioTimeSeries[] = []; + for (const samples of threadSamples) { + if (samples.threadCPURatio && samples.time.length >= 2) { + threadsWithCPU.push({ + time: samples.time, + cpuRatio: samples.threadCPURatio, + maxCpuRatio: Infinity, + length: samples.length, + }); + } + } + + if (threadsWithCPU.length === 0) { + return null; + } + + // Initialize cursors for each thread + const cursors = new Array(threadsWithCPU.length).fill(0); + + // Merge all time points from all threads using a single pass + // Since each thread's times are already sorted, we can use a merge approach + const combinedTime: number[] = []; + const combinedCPURatio: number[] = []; + let combinedMaxCpuRatio = 0; + + // Find the earliest start time and latest end time across all threads + let earliestTime = Infinity; + for (const thread of threadsWithCPU) { + earliestTime = Math.min(earliestTime, thread.time[0]); + } + + // Process time points by repeatedly finding the next smallest time + while (true) { + // Find the next smallest time point among all threads + let sampleTime = Infinity; + for (let threadIdx = 0; threadIdx < threadsWithCPU.length; threadIdx++) { + const cursor = cursors[threadIdx]; + const thread = threadsWithCPU[threadIdx]; + if (cursor < thread.time.length) { + sampleTime = Math.min(sampleTime, thread.time[cursor]); + } + } + + if (sampleTime === Infinity) { + break; // No more time points + } + + // Advance cursors for all threads at this time point, and + // sum CPU ratios from all active threads at this time point + let sumCPURatio = 0; + for (let threadIdx = 0; threadIdx < threadsWithCPU.length; threadIdx++) { + const thread = threadsWithCPU[threadIdx]; + const cursor = cursors[threadIdx]; + if (cursor === thread.time.length) { + // This thread has already ended. + continue; + } + if (cursor > 0) { + sumCPURatio += thread.cpuRatio[cursor]; + } + if (thread.time[cursor] === sampleTime) { + cursors[threadIdx]++; + } + } + + // Add this time point + combinedTime.push(sampleTime); + combinedCPURatio.push(sumCPURatio); + combinedMaxCpuRatio = Math.max(combinedMaxCpuRatio, sumCPURatio); + } + + return { + time: combinedTime, + cpuRatio: Float64Array.from(combinedCPURatio), + maxCpuRatio: combinedMaxCpuRatio, + length: combinedTime.length, + }; +} diff --git a/src/selectors/per-thread/thread.tsx b/src/selectors/per-thread/thread.tsx index fc8b619b89..aa3dbbe94c 100644 --- a/src/selectors/per-thread/thread.tsx +++ b/src/selectors/per-thread/thread.tsx @@ -50,6 +50,8 @@ import type { MarkerSelectorsPerThread } from './markers'; import { mergeThreads } from '../../profile-logic/merge-compare'; import { defaultThreadViewOptions } from '../../reducers/profile-view'; +import type { SliceTree } from '../../utils/slice-tree'; +import { getSlices } from '../../utils/slice-tree'; /** * Infer the return type from the getBasicThreadSelectorsPerThread and @@ -100,6 +102,17 @@ export function getBasicThreadSelectorsPerThread( ProfileSelectors.getReferenceCPUDeltaPerMs, ProfileData.computeSamplesTableFromRawSamplesTable ); + const getActivitySlices: Selector = createSelector( + getSamplesTable, + (samples) => + samples.threadCPURatio + ? getSlices( + [0.05, 0.2, 0.4, 0.6, 0.8], + samples.threadCPURatio, + samples.time + ) + : null + ); const getNativeAllocations: Selector = ( state ) => getRawThread(state).nativeAllocations; @@ -386,6 +399,7 @@ export function getBasicThreadSelectorsPerThread( getRawThread, getThread, getSamplesTable, + getActivitySlices, getSamplesWeightType, getNativeAllocations, getJsAllocations, diff --git a/src/selectors/profile.ts b/src/selectors/profile.ts index 38d2e68e9e..a8d9d7cd95 100644 --- a/src/selectors/profile.ts +++ b/src/selectors/profile.ts @@ -4,7 +4,10 @@ import { createSelector } from 'reselect'; import * as Tracks from '../profile-logic/tracks'; import * as CPU from '../profile-logic/cpu'; +import * as CombinedCPU from '../profile-logic/combined-cpu'; import * as UrlState from './url-state'; +import type { SliceTree } from '../utils/slice-tree'; +import { getSlices } from '../utils/slice-tree'; import { ensureExists } from '../utils/types'; import { accumulateCounterSamples, @@ -14,6 +17,7 @@ import { processCounter, getInclusiveSampleIndexRangeForSelection, computeTabToThreadIndexesMap, + computeSamplesTableFromRawSamplesTable, } from '../profile-logic/profile-data'; import type { IPCMarkerCorrelations } from '../profile-logic/marker-data'; import { correlateIPCMarkers } from '../profile-logic/marker-data'; @@ -65,6 +69,7 @@ import type { MarkerSchema, MarkerSchemaByName, SampleUnits, + SamplesTable, IndexIntoSamplesTable, ExtraProfileInfoSection, TableViewOptions, @@ -678,6 +683,97 @@ export const getThreadActivityScores: Selector> = } ); +/** + * Get the CPU time in milliseconds for each thread. + * Returns an array of CPU times (one per thread), or null if no CPU delta + * information is available. This uses the raw sampleScore without boost factors. + */ +export const getThreadCPUTimeMs: Selector | null> = + createSelector(getProfile, (profile) => { + const { threads, meta } = profile; + const { sampleUnits } = meta; + + if (!sampleUnits || !sampleUnits.threadCPUDelta) { + return null; + } + + // Determine the conversion factor to milliseconds + let cpuDeltaToMs: number; + switch (sampleUnits.threadCPUDelta) { + case 'µs': + cpuDeltaToMs = 1 / 1000; + break; + case 'ns': + cpuDeltaToMs = 1 / 1000000; + break; + case 'variable CPU cycles': + // CPU cycles are not time units, return null + return null; + default: + return null; + } + + return threads.map((thread) => { + const { threadCPUDelta } = thread.samples; + if (!threadCPUDelta) { + return 0; + } + // Sum up all CPU deltas and convert to milliseconds + const totalCPUDelta = threadCPUDelta.reduce( + (accum, delta) => accum + (delta ?? 0), + 0 + ); + return totalCPUDelta * cpuDeltaToMs; + }); + }); + +/** + * Get SamplesTable for all threads in the profile. + * Returns an array of SamplesTable objects, one per thread. + */ +export const getAllThreadsSamplesTables: Selector = + createSelector( + getProfile, + getSampleUnits, + getReferenceCPUDeltaPerMs, + (profile, sampleUnits, referenceCPUDeltaPerMs) => { + return profile.threads.map((thread) => + computeSamplesTableFromRawSamplesTable( + thread.samples, + sampleUnits, + referenceCPUDeltaPerMs + ) + ); + } + ); + +/** + * Get combined CPU activity data from all threads. + * Returns combined time and CPU ratio arrays, or null if no CPU data is available. + */ +export const getCombinedThreadCPUData: Selector = + createSelector(getAllThreadsSamplesTables, (samplesTables) => + CombinedCPU.combineCPUDataFromThreads(samplesTables) + ); + +/** + * Get activity slices for the combined CPU usage across all threads. + * Returns hierarchical slices showing periods of high combined CPU activity, + * or null if no CPU data is available. + */ +export const getCombinedThreadActivitySlices: Selector = + createSelector(getCombinedThreadCPUData, (combinedCPU) => { + if (combinedCPU === null) { + return null; + } + const m = Math.ceil(combinedCPU.maxCpuRatio); + return getSlices( + [0.05 * m, 0.2 * m, 0.4 * m, 0.6 * m, 0.8 * m], + combinedCPU.cpuRatio, + combinedCPU.time + ); + }); + /** * Get the pages array and construct a Map of pages that we can use to get the * relationships of tabs. The constructed map is `Map`. diff --git a/src/test/unit/activity-slice-tree.test.js b/src/test/unit/activity-slice-tree.test.js new file mode 100644 index 0000000000..031d5821aa --- /dev/null +++ b/src/test/unit/activity-slice-tree.test.js @@ -0,0 +1,39 @@ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +// @flow + +import { getSlices, printSliceTree } from '../../utils/slice-tree'; + +function getSlicesEasy(threadCPUPercentage: number[]): string[] { + const time = threadCPUPercentage.map((_, i) => i); + const threadCPURatio = new Float64Array( + threadCPUPercentage.map((p) => p / 100) + ); + const slices = getSlices([0.05, 0.2, 0.4, 0.6, 0.8], threadCPURatio, time); + return printSliceTree(slices, time); +} + +describe('Activity slice tree', function () { + it('allocates the right amount of slots', function () { + expect(getSlicesEasy([0, 0, 6, 0, 0, 0])).toEqual([ + '- 6% for 1.0ms (1 samples): 1.0ms - 2.0ms', + ]); + expect(getSlicesEasy([0, 0, 100, 0, 100, 0, 100, 0, 0, 0])).toEqual([ + '- 60% for 5.0ms (5 samples): 1.0ms - 6.0ms', + ' - 100% for 1.0ms (1 samples): 1.0ms - 2.0ms', + ' - 100% for 1.0ms (1 samples): 3.0ms - 4.0ms', + ' - 100% for 1.0ms (1 samples): 5.0ms - 6.0ms', + ]); + expect( + getSlicesEasy([ + 0, 0, 6, 0, 0, 0, 0, 34, 86, 34, 0, 0, 0, 0, 12, 9, 0, 0, 0, 7, 0, + ]) + ).toEqual([ + '- 10% for 18.0ms (18 samples): 1.0ms - 19.0ms', + ' - 51% for 3.0ms (3 samples): 6.0ms - 9.0ms', + ' - 86% for 1.0ms (1 samples): 7.0ms - 8.0ms', + ]); + }); +}); diff --git a/src/test/unit/combined-cpu.test.ts b/src/test/unit/combined-cpu.test.ts new file mode 100644 index 0000000000..7ddb4c8b1c --- /dev/null +++ b/src/test/unit/combined-cpu.test.ts @@ -0,0 +1,92 @@ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +import { combineCPUDataFromThreads } from 'firefox-profiler/profile-logic/combined-cpu'; +import type { SamplesTable } from 'firefox-profiler/types'; + +function createSamplesTable(time: number[], cpuRatio: number[]): SamplesTable { + return { + time, + threadCPURatio: Float64Array.from(cpuRatio), + // Other required fields (stubbed for test purposes) + stack: new Array(time.length).fill(null), + length: time.length, + weight: null, + weightType: 'samples', + }; +} + +describe('combineCPUDataFromThreads', function () { + it('returns null when given empty array', function () { + const result = combineCPUDataFromThreads([]); + expect(result).toBeNull(); + }); + + it('returns single thread data unchanged for one thread', function () { + const samples = [createSamplesTable([0, 100, 200], [0.0, 0.5, 0.8])]; + + const result = combineCPUDataFromThreads(samples); + + expect(result).not.toBeNull(); + expect(result!.time).toEqual([0, 100, 200]); + expect(Array.from(result!.cpuRatio)).toEqual([0.0, 0.5, 0.8]); + }); + + it('combines two threads with same sample times', function () { + const samples = [ + createSamplesTable([0, 100, 200], [0, 0.5, 0.3]), + createSamplesTable([0, 100, 200], [0, 0.4, 0.5]), + ]; + + const result = combineCPUDataFromThreads(samples); + + expect(result).not.toBeNull(); + expect(result!.time).toEqual([0, 100, 200]); + expect(Array.from(result!.cpuRatio)).toEqual([0, 0.9, 0.8]); + }); + + it('combines threads with different sample times', function () { + const samples = [ + createSamplesTable([0, 100, 200], [0.0, 0.5, 0.8]), + createSamplesTable([50, 150, 250], [0.0, 0.3, 0.4]), + ]; + + const result = combineCPUDataFromThreads(samples); + + expect(result).not.toBeNull(); + // Should have all unique time points + expect(result!.time).toEqual([0, 50, 100, 150, 200, 250]); + + // 0: thread1=bef, thread2=bef → 0.0 + // 0- 50: thread1=0.5, thread2=bef → 0.5 + // 50-100: thread1=0.5, thread2=0.3 → 0.8 + // 100-150: thread1=0.8, thread2=0.3 → 1.1 + // 150-200: thread1=0.8, thread2=0.4 → 1.2 + // 200-250: thread1=end, thread2=0.4 → 0.4 + const expected = [0.0, 0.5, 0.8, 1.1, 1.2, 0.4]; + const actual = Array.from(result!.cpuRatio); + expect(actual.length).toBe(expected.length); + for (let i = 0; i < expected.length; i++) { + expect(actual[i]).toBeCloseTo(expected[i], 10); + } + }); + + it('handles threads with non-overlapping time ranges', function () { + const samples = [ + createSamplesTable([0, 10, 20], [0.0, 0.3, 0.5]), + createSamplesTable([30, 40, 50], [0.0, 0.4, 0.6]), + ]; + + const result = combineCPUDataFromThreads(samples); + + expect(result).not.toBeNull(); + expect(result!.time).toEqual([0, 10, 20, 30, 40, 50]); + + // At times 0, 10, 20: only thread1 has samples + // At times 30, 40, 50: thread1 has ended (30 > 20), only thread2 contributes + expect(Array.from(result!.cpuRatio)).toEqual([ + 0.0, 0.3, 0.5, 0.0, 0.4, 0.6, + ]); + }); +}); diff --git a/src/utils/slice-tree.ts b/src/utils/slice-tree.ts new file mode 100644 index 0000000000..86bbb27b52 --- /dev/null +++ b/src/utils/slice-tree.ts @@ -0,0 +1,199 @@ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +export type Slice = { + start: number; + end: number; + avg: number; + sum: number; + parent: number | null; +}; + +function addIndexIntervalsExceedingThreshold( + threshold: number, + threadCPURatio: Float64Array, + time: number[], + items: Slice[], + parent: number | null, + startIndex: number = 0, + endIndex: number = threadCPURatio.length - 1 +) { + let currentStartIndex = startIndex; + while (true) { + let currentEndIndex = endIndex; + while ( + currentStartIndex < currentEndIndex && + threadCPURatio[currentStartIndex + 1] < threshold + ) { + currentStartIndex++; + } + + while ( + currentStartIndex < currentEndIndex && + threadCPURatio[currentEndIndex] < threshold + ) { + currentEndIndex--; + } + + if (currentStartIndex === currentEndIndex) { + break; + } + + const startTime = time[currentStartIndex]; + let sum = 0; + let lastEndIndexWithAvgExceedingThreshold = currentStartIndex + 1; + let lastEndIndexWithAvgExceedingThresholdAvg = threshold; + let lastEndIndexWithAvgExceedingThresholdSum = 0; + let timeBefore = startTime; + for (let i = currentStartIndex + 1; i <= currentEndIndex; i++) { + const timeAfter = time[i]; + const timeDelta = timeAfter - timeBefore; + sum += threadCPURatio[i] * timeDelta; + if (timeAfter > startTime) { + const avg = sum / (timeAfter - startTime); + if (avg >= threshold) { + lastEndIndexWithAvgExceedingThreshold = i; + lastEndIndexWithAvgExceedingThresholdAvg = avg; + lastEndIndexWithAvgExceedingThresholdSum = sum; + } + } + timeBefore = timeAfter; + } + + // assert(currentStartIndex < lastEndIndexWithAvgExceedingThreshold); + items.push({ + start: currentStartIndex, + end: lastEndIndexWithAvgExceedingThreshold, + avg: lastEndIndexWithAvgExceedingThresholdAvg, + sum: lastEndIndexWithAvgExceedingThresholdSum, + parent, + }); + currentStartIndex = lastEndIndexWithAvgExceedingThreshold; + } +} + +export type SliceTree = { + slices: Slice[]; + time: number[]; +}; + +export function getSlices( + thresholds: number[], + threadCPURatio: Float64Array, + time: number[], + startIndex: number = 0, + endIndex: number = threadCPURatio.length - 1 +): SliceTree { + const firstThreshold = thresholds[0]; + const slices = new Array(); + addIndexIntervalsExceedingThreshold( + firstThreshold, + threadCPURatio, + time, + slices, + null, + startIndex, + endIndex + ); + for (let i = 0; i < slices.length; i++) { + const slice = slices[i]; + const nextThreshold = thresholds.find((thresh) => thresh > slice.avg); + if (nextThreshold === undefined) { + continue; + } + addIndexIntervalsExceedingThreshold( + nextThreshold, + threadCPURatio, + time, + slices, + i, + slice.start, + slice.end + ); + } + return { slices, time }; +} + +function sliceToString(slice: Slice, time: number[]): string { + const { avg, start, end } = slice; + const startTime = time[start]; + const endTime = time[end]; + const duration = endTime - startTime; + const sampleCount = end - start; + return `${Math.round(avg * 100)}% for ${duration.toFixed(1)}ms (${sampleCount} samples): ${startTime.toFixed(1)}ms - ${endTime.toFixed(1)}ms`; +} + +function appendSliceSubtree( + slices: Slice[], + startIndex: number, + parent: number | null, + childrenStartPerParent: number[], + interestingSliceIndexes: Set, + nestingDepth: number, + time: number[], + s: string[] +) { + for (let i = startIndex; i < slices.length; i++) { + if (!interestingSliceIndexes.has(i)) { + continue; + } + + const slice = slices[i]; + if (slice.parent !== parent) { + break; + } + + s.push(' '.repeat(nestingDepth) + '- ' + sliceToString(slice, time)); + + const childrenStart = childrenStartPerParent[i]; + if (childrenStart !== null) { + appendSliceSubtree( + slices, + childrenStart, + i, + childrenStartPerParent, + interestingSliceIndexes, + nestingDepth + 1, + time, + s + ); + } + } +} + +export function printSliceTree({ slices, time }: SliceTree): string[] { + if (slices.length === 0) { + return ['No significant activity.']; + } + + const childrenStartPerParent = new Array(slices.length); + const indexAndSumPerSlice = new Array(slices.length); + for (let i = 0; i < slices.length; i++) { + childrenStartPerParent[i] = null; + const { parent, sum } = slices[i]; + indexAndSumPerSlice.push({ i, sum }); + if (parent !== null && childrenStartPerParent[parent] === null) { + childrenStartPerParent[parent] = i; + } + } + indexAndSumPerSlice.sort((a, b) => b.sum - a.sum); + const interestingSliceIndexes = new Set( + indexAndSumPerSlice.slice(0, 20).map((x) => x.i) + ); + // console.log(interestingSliceIndexes); + + const s = new Array(); + appendSliceSubtree( + slices, + 0, + null, + childrenStartPerParent, + interestingSliceIndexes, + 0, + time, + s + ); + + return s; +} diff --git a/src/utils/window-console.ts b/src/utils/window-console.ts index 09a49f2691..aa47e10bca 100644 --- a/src/utils/window-console.ts +++ b/src/utils/window-console.ts @@ -16,6 +16,7 @@ import { shortenUrl } from 'firefox-profiler/utils/shorten-url'; import { createBrowserConnection } from 'firefox-profiler/app-logic/browser-connection'; import { formatTimestamp } from 'firefox-profiler/utils/format-numbers'; import { togglePseudoStrategy } from 'firefox-profiler/components/app/AppLocalizationProvider'; +import { printSliceTree } from 'firefox-profiler/utils/slice-tree'; import type { CallTree } from 'firefox-profiler/profile-logic/call-tree'; // Despite providing a good libdef for Object.defineProperty, Flow still @@ -46,6 +47,7 @@ export type ExtraPropertiesOnWindowForConsole = { ) => Promise; extractGeckoLogs: () => string; totalMarkerDuration: (markers: any) => number; + activity: () => void; shortenUrl: typeof shortenUrl; getState: GetState; selectors: typeof selectorsForConsole; @@ -330,6 +332,14 @@ export function addDataToWindowObject( return totalDuration; }; + target.activity = function () { + const slices = + selectorsForConsole.selectedThread.getActivitySlices(getState()); + if (slices) { + console.log(printSliceTree(slices).join('\n')); + } + }; + target.shortenUrl = shortenUrl; target.getState = getState; target.selectors = selectorsForConsole; From 5b174ca222843fe68396029c2885ff2b28418699 Mon Sep 17 00:00:00 2001 From: Markus Stange Date: Fri, 25 Jul 2025 21:14:45 -0400 Subject: [PATCH 7/7] Create a profile-query library and a profile-query-cli script. --- .gitignore | 1 + function-name-truncation-proposal.md | 481 ++++++ jest.config.js | 23 + json-output-plan.md | 640 +++++++ json-output-status.md | 248 +++ marker-support-plan.md | 816 +++++++++ package.json | 8 +- pq-case-study-2.md | 527 ++++++ pq-case-study.md | 718 ++++++++ pq-filters-and-bookmarks-proposal.md | 750 ++++++++ pq-todo.md | 488 ++++++ src/profile-logic/call-tree.ts | 4 + src/profile-logic/combined-cpu.ts | 32 +- src/profile-query-cli/.npmignore | 32 + src/profile-query-cli/README.md | 300 ++++ src/profile-query-cli/client.ts | 287 ++++ src/profile-query-cli/constants.ts | 12 + src/profile-query-cli/daemon.ts | 412 +++++ src/profile-query-cli/formatters.ts | 859 ++++++++++ src/profile-query-cli/index.ts | 664 ++++++++ src/profile-query-cli/package.json | 35 + src/profile-query-cli/protocol.ts | 128 ++ src/profile-query-cli/session.ts | 202 +++ src/profile-query-cli/tests/basic.test.ts | 95 ++ .../tests/daemon-startup.test.ts | 116 ++ src/profile-query-cli/tests/sessions.test.ts | 117 ++ src/profile-query-cli/tests/setup.ts | 7 + src/profile-query-cli/tests/utils.ts | 161 ++ src/profile-query-cli/webpack.config.js | 59 + src/profile-query/README.md | 58 + src/profile-query/cpu-activity.ts | 208 +++ src/profile-query/formatters/call-tree.ts | 340 ++++ src/profile-query/formatters/marker-info.ts | 1516 +++++++++++++++++ src/profile-query/formatters/profile-info.ts | 240 +++ src/profile-query/formatters/thread-info.ts | 492 ++++++ src/profile-query/function-list.ts | 525 ++++++ src/profile-query/function-map.ts | 69 + src/profile-query/index.ts | 679 ++++++++ src/profile-query/loader.ts | 131 ++ src/profile-query/marker-map.ts | 72 + src/profile-query/process-thread-list.ts | 178 ++ src/profile-query/thread-map.ts | 47 + src/profile-query/time-range-parser.ts | 63 + src/profile-query/timestamps.ts | 312 ++++ src/profile-query/types.ts | 428 +++++ src/profile-query/webpack.config.js | 41 + src/selectors/per-thread/thread.tsx | 17 + src/selectors/profile.ts | 33 + .../call-tree-formatting.test.ts.snap | 318 ++++ .../call-tree-formatting.test.ts | 616 +++++++ .../unit/profile-query-cli/client.test.ts | 28 + .../unit/profile-query-cli/daemon.test.ts | 28 + .../unit/profile-query-cli/session.test.ts | 355 ++++ .../unit/profile-query-marker-utils.test.ts | 208 +++ src/test/unit/profile-query/call-tree.test.ts | 589 +++++++ .../unit/profile-query/function-list.test.ts | 608 +++++++ .../profile-query/process-thread-list.test.ts | 379 +++++ .../profile-query/profile-querier.test.ts | 210 +++ .../profile-query/time-range-parser.test.ts | 145 ++ .../unit/profile-query/timestamps.test.ts | 133 ++ 60 files changed, 17282 insertions(+), 6 deletions(-) create mode 100644 function-name-truncation-proposal.md create mode 100644 json-output-plan.md create mode 100644 json-output-status.md create mode 100644 marker-support-plan.md create mode 100644 pq-case-study-2.md create mode 100644 pq-case-study.md create mode 100644 pq-filters-and-bookmarks-proposal.md create mode 100644 pq-todo.md create mode 100644 src/profile-query-cli/.npmignore create mode 100644 src/profile-query-cli/README.md create mode 100644 src/profile-query-cli/client.ts create mode 100644 src/profile-query-cli/constants.ts create mode 100644 src/profile-query-cli/daemon.ts create mode 100644 src/profile-query-cli/formatters.ts create mode 100644 src/profile-query-cli/index.ts create mode 100644 src/profile-query-cli/package.json create mode 100644 src/profile-query-cli/protocol.ts create mode 100644 src/profile-query-cli/session.ts create mode 100644 src/profile-query-cli/tests/basic.test.ts create mode 100644 src/profile-query-cli/tests/daemon-startup.test.ts create mode 100644 src/profile-query-cli/tests/sessions.test.ts create mode 100644 src/profile-query-cli/tests/setup.ts create mode 100644 src/profile-query-cli/tests/utils.ts create mode 100644 src/profile-query-cli/webpack.config.js create mode 100644 src/profile-query/README.md create mode 100644 src/profile-query/cpu-activity.ts create mode 100644 src/profile-query/formatters/call-tree.ts create mode 100644 src/profile-query/formatters/marker-info.ts create mode 100644 src/profile-query/formatters/profile-info.ts create mode 100644 src/profile-query/formatters/thread-info.ts create mode 100644 src/profile-query/function-list.ts create mode 100644 src/profile-query/function-map.ts create mode 100644 src/profile-query/index.ts create mode 100644 src/profile-query/loader.ts create mode 100644 src/profile-query/marker-map.ts create mode 100644 src/profile-query/process-thread-list.ts create mode 100644 src/profile-query/thread-map.ts create mode 100644 src/profile-query/time-range-parser.ts create mode 100644 src/profile-query/timestamps.ts create mode 100644 src/profile-query/types.ts create mode 100644 src/profile-query/webpack.config.js create mode 100644 src/test/unit/profile-query-cli/__snapshots__/call-tree-formatting.test.ts.snap create mode 100644 src/test/unit/profile-query-cli/call-tree-formatting.test.ts create mode 100644 src/test/unit/profile-query-cli/client.test.ts create mode 100644 src/test/unit/profile-query-cli/daemon.test.ts create mode 100644 src/test/unit/profile-query-cli/session.test.ts create mode 100644 src/test/unit/profile-query-marker-utils.test.ts create mode 100644 src/test/unit/profile-query/call-tree.test.ts create mode 100644 src/test/unit/profile-query/function-list.test.ts create mode 100644 src/test/unit/profile-query/process-thread-list.test.ts create mode 100644 src/test/unit/profile-query/profile-querier.test.ts create mode 100644 src/test/unit/profile-query/time-range-parser.test.ts create mode 100644 src/test/unit/profile-query/timestamps.test.ts diff --git a/.gitignore b/.gitignore index 14f36789cc..19f3f00b5c 100644 --- a/.gitignore +++ b/.gitignore @@ -12,3 +12,4 @@ coverage webpack.local-config.js *.orig *.rej +.pq-dev diff --git a/function-name-truncation-proposal.md b/function-name-truncation-proposal.md new file mode 100644 index 0000000000..889438ab35 --- /dev/null +++ b/function-name-truncation-proposal.md @@ -0,0 +1,481 @@ +# Smart Function Name Truncation Proposal + +## Problem Statement + +Current truncation in pq cuts function names at arbitrary character positions, breaking in the middle of words and losing critical information: + +``` +Bad: mozilla::interceptor::FuncHook>::operator() +Good: std::_Hash>::~_Hash() +Good: mozilla::wr::RenderThread::UpdateAndRender(...) +``` + +## Key Insights + +1. **Function name is at the end** - Method names, function names, operators appear after all namespace/template noise +2. **Middle is noise** - Template parameters and nested namespaces in the middle can be elided +3. **Proper nesting matters** - Can't break inside `<...>` or `(...)` without understanding the structure +4. **Uniform limit works** - With smart truncation, a single high limit (e.g., 120 chars) works everywhere + +## Algorithm Overview + +### Step 1: Parse the Function Name Structure + +Parse the function name into tokens, tracking nesting depth: + +```typescript +type Token = { + text: string; + type: 'text' | 'open' | 'close'; + depth: number; // Nesting depth at this point +}; +``` + +**Parsing rules:** + +- `<` and `(` are "open" tokens, increase depth +- `>` and `)` are "close" tokens, decrease depth +- Everything else is "text" +- Split "text" at `::` boundaries for namespace resolution + +**Example:** + +``` +Input: std::vector>::iterator + +Tokens: + { text: "std", type: "text", depth: 0 } + { text: "::", type: "text", depth: 0 } + { text: "vector", type: "text", depth: 0 } + { text: "<", type: "open", depth: 0 } // depth becomes 1 + { text: "std::pair", type: "text", depth: 1 } + { text: "<", type: "open", depth: 1 } // depth becomes 2 + { text: "int,std::string", type: "text", depth: 2 } + { text: ">", type: "close", depth: 2 } // depth becomes 1 + { text: ">", type: "close", depth: 1 } // depth becomes 0 + { text: "::", type: "text", depth: 0 } + { text: "iterator", type: "text", depth: 0 } +``` + +### Step 2: Identify Prefix and Suffix Regions + +**Prefix:** Everything up to the last top-level (depth 0) `::` or opening bracket +**Suffix:** Function name + parameters + template suffix + +Examples: + +``` +mozilla::wr::RenderThread::UpdateAndRender(mozilla::wr::WrWindowId) +^------ prefix -------^ ^--------- suffix ---------^ + +std::_Hash>::~_Hash() +^-- prefix --^ ^- suffix -^ +``` + +### Step 3: Truncate Intelligently + +If `prefix.length + suffix.length + 3 <= maxLength`: + +- Return full name (no truncation needed) + +Else: + +- Calculate available space: `available = maxLength - 3` (for "...") +- Allocate to suffix: `suffixLen = min(suffix.length, available * 0.4)` (40% of space) +- Allocate to prefix: `prefixLen = available - suffixLen` +- Truncate prefix at **top-level namespace boundary** (depth 0, at `::`) +- Truncate suffix from start, preserving **complete parameter lists and template args** +- Return `prefix + "..." + suffix` + +### Step 4: Handle Edge Cases + +**Very long suffix (function name itself is huge):** + +``` +someFunctionWithRidiculouslyLongNameThatKeepsGoingForever() +``` + +- Still preserve `(...)` or `()` at the end +- Truncate the name itself if needed: `someFunctionWithRidicu...KeepsGoingForever()` + +**Nested templates exceeding available space:** + +``` +std::vector>>> +``` + +- Preserve outer structure: `std::vector>` +- Replace entire inner nesting with `...` + +**No namespaces (C functions, simple names):** + +``` +RtlUserThreadStart +malloc +``` + +- No prefix to preserve +- Return full name if it fits +- Simple truncation from start if it doesn't: `RtlUserThread...` + +## Implementation + +### Core Function Signature + +```typescript +/** + * Intelligently truncate a function name, preserving context and function name. + * + * @param functionName - The function name to truncate (without library prefix) + * @param maxLength - Maximum length for truncated output + * @returns Truncated function name, or original if it fits + */ +export function truncateFunctionName( + functionName: string, + maxLength: number +): string; +``` + +**Note:** Library prefix (`nvoglv64.dll!`) is added AFTER truncation by the caller. This function only handles the function name itself. + +### Parsing Implementation + +```typescript +type Token = { + text: string; + type: 'text' | 'open' | 'close'; + nestingDepth: number; +}; + +function tokenizeFunctionName(name: string): Token[] { + const tokens: Token[] = []; + let depth = 0; + let currentText = ''; + + for (let i = 0; i < name.length; i++) { + const char = name[i]; + + if (char === '<' || char === '(') { + // Flush any accumulated text + if (currentText) { + tokens.push({ text: currentText, type: 'text', nestingDepth: depth }); + currentText = ''; + } + tokens.push({ text: char, type: 'open', nestingDepth: depth }); + depth++; + } else if (char === '>' || char === ')') { + // Flush any accumulated text + if (currentText) { + tokens.push({ text: currentText, type: 'text', nestingDepth: depth }); + currentText = ''; + } + depth--; + tokens.push({ text: char, type: 'close', nestingDepth: depth }); + } else { + currentText += char; + } + } + + // Flush remaining text + if (currentText) { + tokens.push({ text: currentText, type: 'text', nestingDepth: depth }); + } + + return tokens; +} +``` + +### Truncation Implementation + +```typescript +function truncateFunctionName(functionName: string, maxLength: number): string { + if (functionName.length <= maxLength) { + return functionName; + } + + const tokens = tokenizeFunctionName(functionName); + + // Find the last top-level namespace separator (depth 0, "::") + let lastTopLevelSeparatorIndex = -1; + for (let i = tokens.length - 1; i >= 0; i--) { + if ( + tokens[i].nestingDepth === 0 && + tokens[i].type === 'text' && + tokens[i].text.includes('::') + ) { + // Find the last :: in this token + const lastColons = tokens[i].text.lastIndexOf('::'); + if (lastColons !== -1) { + lastTopLevelSeparatorIndex = i; + break; + } + } + } + + // Split into prefix and suffix + let prefixTokens: Token[]; + let suffixTokens: Token[]; + + if (lastTopLevelSeparatorIndex !== -1) { + // Split at the namespace separator + const sepToken = tokens[lastTopLevelSeparatorIndex]; + const lastColons = sepToken.text.lastIndexOf('::'); + + prefixTokens = tokens.slice(0, lastTopLevelSeparatorIndex); + prefixTokens.push({ + text: sepToken.text.substring(0, lastColons + 2), + type: 'text', + nestingDepth: 0, + }); + + const remainingText = sepToken.text.substring(lastColons + 2); + suffixTokens = []; + if (remainingText) { + suffixTokens.push({ text: remainingText, type: 'text', nestingDepth: 0 }); + } + suffixTokens.push(...tokens.slice(lastTopLevelSeparatorIndex + 1)); + } else { + // No namespace separator, everything is suffix + prefixTokens = []; + suffixTokens = tokens; + } + + const prefix = tokensToString(prefixTokens); + const suffix = tokensToString(suffixTokens); + + const ellipsis = '...'; + const available = maxLength - ellipsis.length; + + // Allocate space: 40% to suffix (the function name), 60% to prefix (context) + const suffixLength = Math.min(suffix.length, Math.floor(available * 0.4)); + const prefixLength = available - suffixLength; + + // Truncate prefix at top-level namespace boundaries + const truncatedPrefix = truncatePrefix(prefixTokens, prefixLength); + + // Truncate suffix, preserving structure + const truncatedSuffix = truncateSuffix(suffixTokens, suffixLength); + + return truncatedPrefix + ellipsis + truncatedSuffix; +} + +function tokensToString(tokens: Token[]): string { + return tokens.map((t) => t.text).join(''); +} + +function truncatePrefix(tokens: Token[], maxLength: number): string { + // Build prefix up to maxLength, preferring to break at namespace boundaries (::) + let result = ''; + + for (const token of tokens) { + if (token.nestingDepth > 0) { + // Inside template or params, skip entire nested section if it doesn't fit + const remaining = tokensToString(tokens.slice(tokens.indexOf(token))); + if (result.length + remaining.length <= maxLength) { + result += token.text; + } else { + // Can't fit, stop here + break; + } + } else { + // Top level text + if (result.length + token.text.length <= maxLength) { + result += token.text; + } else { + // Try to fit partial token, breaking at :: + const parts = token.text.split('::'); + for (let i = 0; i < parts.length; i++) { + const part = parts[i] + (i < parts.length - 1 ? '::' : ''); + if (result.length + part.length <= maxLength) { + result += part; + } else { + break; + } + } + break; + } + } + } + + return result; +} + +function truncateSuffix(tokens: Token[], maxLength: number): string { + // Take from the end, preserving complete structures + let result = ''; + let depth = 0; + + for (let i = tokens.length - 1; i >= 0; i--) { + const token = tokens[i]; + + if (token.type === 'close') { + depth++; + result = token.text + result; + } else if (token.type === 'open') { + depth--; + result = token.text + result; + } else { + // Text token + if (depth > 0 || result.length + token.text.length <= maxLength) { + // Either we're inside nested structure (must include), or it fits + result = token.text + result; + } else { + // Try to fit partial text from end + const availableSpace = maxLength - result.length; + if (availableSpace > 0) { + const truncatedText = token.text.substring( + token.text.length - availableSpace + ); + result = truncatedText + result; + } + break; + } + } + + if (result.length >= maxLength && depth === 0) { + break; + } + } + + return result; +} +``` + +## Examples + +### Example 1: C++ Template-Heavy Function + +**Input:** + +``` +std::_Hash,std::equal_to>,std::allocator>,0>>::~_Hash() +``` + +**Tokens (simplified):** + +``` +"std::_Hash" (depth 0) +"<" (depth 0→1) +"std::_Umap_traits" (depth 1) +"<" (depth 1→2) +... (depth 2, nested templates) +">" (depth 2→1) +">" (depth 1→0) +"::" (depth 0) +"~_Hash()" (depth 0) +``` + +**Truncated (120 chars):** + +``` +std::_Hash>::~_Hash() +``` + +### Example 2: Mozilla Namespace-Heavy Function + +**Input:** + +``` +mozilla::interceptor::FuncHook>::operator() +``` + +**Truncated (120 chars):** + +``` +mozilla::interceptor::FuncHook>::operator() +``` + +### Example 3: WebRender Function with Params + +**Input:** + +``` +mozilla::wr::RenderThread::UpdateAndRender(mozilla::wr::WrWindowId, mozilla::layers::BaseTransactionId) +``` + +**Truncated (120 chars):** + +``` +mozilla::wr::RenderThread::UpdateAndRender(mozilla::wr::WrWindowId, mozilla::layers::BaseTransactionId<...>) +``` + +### Example 4: Short Function (No Truncation) + +**Input:** + +``` +RtlUserThreadStart +``` + +**Truncated (120 chars):** + +``` +RtlUserThreadStart +``` + +### Example 5: Very Long Function Name + +**Input:** + +``` +someRidiculouslyLongFunctionNameThatJustKeepsGoingAndGoingWithoutAnyTemplatesOrNamespaces() +``` + +**Truncated (120 chars):** + +``` +someRidiculouslyLongFunctionNameThatJustKeepsGoingAndGoingWi...sOrNamespaces() +``` + +## Benefits + +1. **Readability**: Can always see what function you're looking at +2. **Context**: Namespace/class information is preserved +3. **No mid-word breaks**: Respects C++ syntax structure +4. **Uniform limit**: One limit (120 chars) works everywhere +5. **Graceful degradation**: Falls back to simple truncation when structure is unclear + +## Implementation Plan + +1. **Phase 1:** Implement tokenizer and basic truncation +2. **Phase 2:** Add smart prefix/suffix selection +3. **Phase 3:** Handle edge cases (nested templates, very long names) +4. **Phase 4:** Add tests with real-world function names from profiles +5. **Phase 5:** Update all call sites to use new truncation + +## Recommended Limits + +With smart truncation, we can use **higher, uniform limits**: + +- **120 characters** everywhere (function lists, call trees, heaviest stack) +- No need for different limits per context +- Smart truncation ensures short names stay short +- Long names get intelligently truncated with preserved meaning + +## Migration + +**Before:** Different limits, dumb truncation + +```typescript +truncateFunctionName(name, 100); // Function lists +truncateFunctionName(name, 60); // Call trees +``` + +**After:** Uniform limit, smart truncation + +```typescript +truncateFunctionName(name, 120); // Everywhere +``` + +Callers don't need to change - just update the implementation of `truncateFunctionName()`. diff --git a/jest.config.js b/jest.config.js index 8c54951c30..2b0c0ec7c8 100644 --- a/jest.config.js +++ b/jest.config.js @@ -56,5 +56,28 @@ module.exports = { printBasicPrototype: true, }, }, + + // ======================================================================== + // CLI Tests (Node.js environment) + // ======================================================================== + { + displayName: 'cli', + testMatch: ['/src/profile-query-cli/tests/**/*.test.ts'], + + // Use Node.js environment (not browser/jsdom) + testEnvironment: 'node', + + // CLI-specific setup (just jest-extended for matchers) + setupFilesAfterEnv: ['./src/profile-query-cli/tests/setup.ts'], + + // CLI operations can be slow (loading profiles, spawning processes) + testTimeout: 30000, + + // File extensions for CLI tests + moduleFileExtensions: ['ts', 'js'], + + // No need for asset mocks in CLI tests + // No transformIgnorePatterns needed - we don't use ESM-only deps here + }, ], }; diff --git a/json-output-plan.md b/json-output-plan.md new file mode 100644 index 0000000000..55f36fd2b8 --- /dev/null +++ b/json-output-plan.md @@ -0,0 +1,640 @@ +# JSON Output Support Plan for profile-query + +## Overview + +Support JSON output across all profile-query commands to enable piping output to `jq` and other JSON tools. The approach is to: + +1. Have ProfileQuerier methods return structured data objects instead of formatted strings +2. Send these structured objects across IPC +3. In the CLI, either print JSON (with `--json` flag) or format as plain text + +## Architecture Changes + +### 1. Protocol Changes (`profile-query-cli/protocol.ts`) + +Change the `ServerResponse` type from: + +```typescript +export type ServerResponse = + | { type: 'success'; result: string } + | { type: 'error'; error: string } + | { type: 'loading' } + | { type: 'ready' }; +``` + +To: + +```typescript +export type ServerResponse = + | { type: 'success'; result: CommandResult } + | { type: 'error'; error: string } + | { type: 'loading' } + | { type: 'ready' }; + +// CommandResult is a union of all possible result types +export type CommandResult = + | ProfileInfoResult + | ThreadInfoResult + | ThreadSamplesResult + | ThreadMarkersResult + | MarkerInfoResult + | MarkerStackResult + | FunctionExpandResult + | FunctionInfoResult + | ViewRangeResult + | StatusResult; +``` + +### 2. CLI Changes (`profile-query-cli/index.ts`) + +Add `--json` flag to CLI arguments: + +```typescript +boolean: ['daemon', 'help', 'h', 'all', 'has-stack', 'auto-group', 'json'], +``` + +After receiving result from `sendCommand()`, check for `--json` flag: + +```typescript +const result = await sendCommand(SESSION_DIR, { command: ... }, argv.session); + +if (argv.json) { + console.log(JSON.stringify(result, null, 2)); +} else { + const formatted = formatCommandResult(result); + console.log(formatted); +} +``` + +### 3. ProfileQuerier Changes (`profile-query/index.ts`) + +Change method signatures from `Promise` to `Promise` where `XxxResult` is a structured type. + +### 4. New Formatter Module + +Create `profile-query-cli/formatters.ts` that contains all plain-text formatting logic moved from ProfileQuerier and formatter modules. + +--- + +## Command-by-Command Plan + +### 1. `profile info` → `profileInfo()` + +**Current formatter:** `formatProfileInfo()` in `profile-query/formatters/profile-info.ts:19` + +**JSON Structure:** + +```typescript +interface ProfileInfoResult { + type: 'profile-info'; + name: string; + platform: string; + threadCount: number; + processCount: number; + + processes: Array<{ + processIndex: number; + pid: string; + name: string; + cpuMs: number; + startTime?: number; + startTimeName?: string; + endTime?: number | null; + endTimeName?: string | null; + + threads: Array<{ + threadIndex: number; + threadHandle: string; + name: string; + cpuMs: number; + }>; + + remainingThreads?: { + count: number; + combinedCpuMs: number; + maxCpuMs: number; + }; + }>; + + remainingProcesses?: { + count: number; + combinedCpuMs: number; + maxCpuMs: number; + }; + + cpuActivity: Array<{ + startTime: number; + startTimeName: string; + endTime: number; + endTimeName: string; + cpuMs: number; + depthLevel: number; + }> | null; +} +``` + +**Refactoring:** + +1. Extract data collection logic from `formatProfileInfo()` into new `collectProfileInfo()` function +2. Have `ProfileQuerier.profileInfo()` call `collectProfileInfo()` and return structured data +3. Move text formatting from `formatProfileInfo()` to `profile-query-cli/formatters.ts` + +--- + +### 2. `thread info` → `threadInfo()` + +**Current formatter:** `formatThreadInfo()` in `profile-query/formatters/thread-info.ts:33` + +**JSON Structure:** + +```typescript +interface ThreadInfoResult { + type: 'thread-info'; + threadHandle: string; + threadIndex: number; + name: string; + friendlyName: string; + createdAt: number; + createdAtName: string; + endedAt: number | null; + endedAtName: string | null; + sampleCount: number; + markerCount: number; + + cpuActivity: Array<{ + startTime: number; + startTimeName: string; + endTime: number; + endTimeName: string; + cpuMs: number; + depthLevel: number; + }> | null; +} +``` + +**Refactoring:** + +1. Extract data collection from `formatThreadInfo()` into new `collectThreadInfo()` function +2. Have `ProfileQuerier.threadInfo()` return structured data +3. Move text formatting to CLI formatters + +--- + +### 3. `thread samples` → `threadSamples()` + +**Current formatter:** `formatThreadSamples()` in `profile-query/formatters/thread-info.ts:134` + +**JSON Structure:** + +```typescript +interface ThreadSamplesResult { + type: 'thread-samples'; + threadHandle: string; + threadIndex: number; + friendlyThreadName: string; + + topFunctionsByTotal: Array<{ + functionHandle: string; + functionIndex: number; + name: string; + nameWithLibrary: string; + totalSamples: number; + totalPercentage: number; + library?: string; + }>; + + topFunctionsBySelf: Array<{ + functionHandle: string; + functionIndex: number; + name: string; + nameWithLibrary: string; + selfSamples: number; + selfPercentage: number; + library?: string; + }>; + + regularCallTree: CallTreeNode; + invertedCallTree: CallTreeNode | null; + + heaviestStack: { + selfSamples: number; + frameCount: number; + frames: Array<{ + funcIndex: number; + name: string; + nameWithLibrary: string; + }>; + }; +} + +interface CallTreeNode { + funcIndex: number; + name: string; + nameWithLibrary: string; + totalSamples: number; + totalPercentage: number; + selfSamples: number; + selfPercentage: number; + children: CallTreeNode[]; +} +``` + +**Refactoring:** + +1. Extract data from `formatThreadSamples()` - much of the data is already collected +2. Create helper to convert CallTree to JSON structure +3. Move formatting logic (truncation, indentation) to CLI +4. Note: `formatCallTree()` in `profile-query/formatters/call-tree.ts:15` will need data extraction variant + +--- + +### 4. `thread markers` → `threadMarkers()` + +**Current formatter:** `formatThreadMarkers()` in `profile-query/formatters/marker-info.ts:611` + +**JSON Structure:** + +```typescript +interface ThreadMarkersResult { + type: 'thread-markers'; + threadHandle: string; + threadIndex: number; + friendlyThreadName: string; + totalMarkerCount: number; + filteredMarkerCount: number; + + filters?: { + searchString?: string; + minDuration?: number; + maxDuration?: number; + category?: string; + hasStack?: boolean; + limit?: number; + }; + + byType: Array<{ + typeName: string; + count: number; + isInterval: boolean; + + durationStats?: { + min: number; + max: number; + avg: number; + median: number; + p95: number; + p99: number; + }; + + rateStats?: { + markersPerSecond: number; + minGap: number; + avgGap: number; + maxGap: number; + }; + + topMarkers: Array<{ + handle: string; + markerIndex: number; + label: string; + start: number; + duration?: number; + }>; + + subGroups?: MarkerGroupData[]; + subGroupKey?: string; + }>; + + byCategory: Array<{ + categoryName: string; + categoryIndex: number; + count: number; + percentage: number; + }>; + + customGroups?: MarkerGroupData[]; +} + +interface MarkerGroupData { + groupName: string; + count: number; + isInterval: boolean; + durationStats?: DurationStats; + rateStats?: RateStats; + topMarkers: Array<{ + handle: string; + markerIndex: number; + label: string; + start: number; + duration?: number; + }>; + subGroups?: MarkerGroupData[]; +} +``` + +**Refactoring:** + +1. Most aggregation logic in `formatThreadMarkers()` already produces structured data +2. Extract `MarkerTypeStats` and `MarkerGroup` interfaces to result types +3. Separate formatting lines from data aggregation +4. Move `formatMarkerGroups()` logic to CLI formatters + +--- + +### 5. `marker info` → `markerInfo()` + +**Current formatter:** `formatMarkerInfo()` in `profile-query/formatters/marker-info.ts:971` + +**JSON Structure:** + +```typescript +interface MarkerInfoResult { + type: 'marker-info'; + markerHandle: string; + markerIndex: number; + threadHandle: string; + threadIndex: number; + friendlyThreadName: string; + + name: string; + tooltipLabel?: string; + markerType?: string; + category: { + index: number; + name: string; + }; + + start: number; + end: number | null; + duration?: number; + + fields?: Array<{ + key: string; + label: string; + value: any; + formattedValue: string; + }>; + + schema?: { + description?: string; + }; + + stack?: StackTraceData; +} + +interface StackTraceData { + capturedAt?: number; + frames: Array<{ + funcIndex: number; + name: string; + nameWithLibrary: string; + library?: string; + }>; + truncated: boolean; +} +``` + +**Refactoring:** + +1. Extract data collection from `formatMarkerInfo()` +2. Separate stack formatting (limited to 20 frames) from full stack data +3. Move text formatting to CLI + +--- + +### 6. `marker stack` → `markerStack()` + +**Current formatter:** `formatMarkerStackFull()` in `profile-query/formatters/marker-info.ts:922` + +**JSON Structure:** + +```typescript +interface MarkerStackResult { + type: 'marker-stack'; + markerHandle: string; + markerIndex: number; + threadHandle: string; + threadIndex: number; + friendlyThreadName: string; + markerName: string; + + stack: StackTraceData | null; +} +``` + +**Refactoring:** + +1. Extract stack data from `formatMarkerStackFull()` +2. Reuse `StackTraceData` interface from marker info +3. Move formatting to CLI + +--- + +### 7. `function expand` → `functionExpand()` + +**Current formatting:** Inline in `ProfileQuerier.functionExpand()` at `profile-query/index.ts:267` + +**JSON Structure:** + +```typescript +interface FunctionExpandResult { + type: 'function-expand'; + functionHandle: string; + funcIndex: number; + threadHandle: string; + threadIndex: number; + + name: string; + fullName: string; + library?: string; +} +``` + +**Refactoring:** + +1. Return structured data instead of formatted string +2. Move text formatting to CLI + +--- + +### 8. `function info` → `functionInfo()` + +**Current formatting:** Inline in `ProfileQuerier.functionInfo()` at `profile-query/index.ts:303` + +**JSON Structure:** + +```typescript +interface FunctionInfoResult { + type: 'function-info'; + functionHandle: string; + funcIndex: number; + threadHandle: string; + threadIndex: number; + threadName: string; + + name: string; + isJS: boolean; + relevantForJS: boolean; + + resource?: { + name: string; + index: number; + }; + + library?: { + name: string; + path: string; + debugName?: string; + debugPath?: string; + breakpadId?: string; + }; +} +``` + +**Refactoring:** + +1. Return structured data instead of formatted string +2. Move text formatting to CLI + +--- + +### 9. `view push` → `pushViewRange()` + +**Current formatting:** Inline in `ProfileQuerier.pushViewRange()` at `profile-query/index.ts:121` + +**JSON Structure:** + +```typescript +interface ViewRangeResult { + type: 'view-range'; + action: 'push' | 'pop'; + + range: { + start: number; + startName: string; + end: number; + endName: string; + }; + + message: string; +} +``` + +**Refactoring:** + +1. Return structured data with range info +2. Move text formatting to CLI + +--- + +### 10. `view pop` → `popViewRange()` + +**Current formatting:** Inline in `ProfileQuerier.popViewRange()` at `profile-query/index.ts:179` + +**JSON Structure:** +Same as `ViewRangeResult` above. + +**Refactoring:** + +1. Return structured data +2. Move text formatting to CLI + +--- + +### 11. `status` → `getStatus()` + +**Current formatting:** Inline in `ProfileQuerier.getStatus()` at `profile-query/index.ts:226` + +**JSON Structure:** + +```typescript +interface StatusResult { + type: 'status'; + + selectedThread: { + threadHandle: string; + threadIndex: number; + name: string; + } | null; + + viewRanges: Array<{ + start: number; + startName: string; + end: number; + endName: string; + }>; + + rootRange: { + start: number; + end: number; + }; +} +``` + +**Refactoring:** + +1. Return structured data +2. Move text formatting to CLI + +--- + +## Implementation Strategy + +### Phase 1: Infrastructure + +1. Add result type definitions to `protocol.ts` +2. Add `--json` flag handling to CLI +3. Create `profile-query-cli/formatters.ts` module +4. Update daemon to handle new response types + +### Phase 2: Simple Commands (good starting points) + +1. `status` - simplest command +2. `function expand` - simple inline formatting +3. `function info` - simple inline formatting +4. `view push` / `view pop` - simple inline formatting + +### Phase 3: Medium Complexity + +1. `thread info` - single formatter, moderate data +2. `marker info` - moderate complexity +3. `marker stack` - can reuse marker info infrastructure + +### Phase 4: Complex Commands + +1. `profile info` - complex data structure with CPU activity +2. `thread samples` - involves call trees and multiple data sources +3. `thread markers` - most complex with grouping and aggregation + +### Phase 5: Testing & Polish + +1. Add tests for JSON output +2. Ensure backward compatibility +3. Update documentation + +--- + +## Migration Notes + +### Breaking Changes + +None - this is purely additive. Without `--json` flag, behavior is unchanged. + +### Backward Compatibility + +The IPC protocol change needs version checking. Add a protocol version field: + +```typescript +export interface SessionMetadata { + // ... existing fields ... + protocolVersion: number; // Add this +} +``` + +If client and daemon have mismatched protocol versions, show clear error message. + +### Testing + +1. Unit tests for each data collection function +2. Integration tests comparing JSON → formatted output with current text output +3. Golden file tests for JSON structure stability diff --git a/json-output-status.md b/json-output-status.md new file mode 100644 index 0000000000..163695a9ce --- /dev/null +++ b/json-output-status.md @@ -0,0 +1,248 @@ +# JSON Output Implementation Status + +## Goal + +Add JSON output support to all profile-query CLI commands to enable programmatic data processing with tools like `jq`. The implementation allows users to choose between human-readable text output (default) and structured JSON output (with `--json` flag). + +## Architecture + +The implementation uses a three-layer architecture: + +1. **ProfileQuerier methods** return structured TypeScript objects (e.g., `StatusResult`, `ThreadInfoResult`) instead of formatted strings +2. **IPC layer** passes these structured objects through the daemon unchanged +3. **CLI layer** either outputs JSON directly or formats the structured data as plain text using dedicated formatters + +### Key Files + +- `src/profile-query-cli/protocol.ts` - All `CommandResult` type definitions +- `src/profile-query-cli/formatters.ts` - Plain text formatters for each result type +- `src/profile-query-cli/index.ts` - CLI with `--json` flag and `formatOutput()` dispatcher +- `src/profile-query/index.ts` - ProfileQuerier methods returning structured results +- `src/profile-query/formatters/*.ts` - Data collection helpers that extract structured data + +## Current Status + +### ✅ All Commands Complete (11 of 11) + +All profile-query CLI commands now support both plain text and JSON output modes. + +### ✅ Infrastructure (Complete) + +All foundational work is complete: + +- ✅ Added all `CommandResult` type definitions to `protocol.ts` +- ✅ Updated `ServerResponse` to support `string | CommandResult` +- ✅ Added `--json` flag to CLI with `formatOutput()` dispatcher +- ✅ Created `formatters.ts` module in CLI for plain text formatting +- ✅ Updated daemon `processCommand()` to pass through structured results + +### ✅ Converted Commands (11 of 11) + +#### Simple Commands (4/4 complete) + +1. **status** (`getStatus()` → `StatusResult`) + - Returns session status, selected thread, view ranges + - Formatter: `formatStatusResult()` + +2. **function expand** (`functionExpand()` → `FunctionExpandResult`) + - Returns full untruncated function name with library + - Formatter: `formatFunctionExpandResult()` + +3. **function info** (`functionInfo()` → `FunctionInfoResult`) + - Returns detailed function info including resource and library details + - Formatter: `formatFunctionInfoResult()` + +4. **view push/pop** (`pushViewRange()`, `popViewRange()` → `ViewRangeResult`) + - Returns view range operation results with timestamps + - Formatter: `formatViewRangeResult()` + +#### Medium Commands (7/7 complete) + +5. **thread info** (`threadInfo()` → `ThreadInfoResult`) + - Returns thread details with structured CPU activity data + - Created `CpuActivityEntry` interface and `collectSliceTree()` helper + - Formatter: `formatThreadInfoResult()` + +6. **thread samples** (`threadSamples()` → `ThreadSamplesResult`) + - Returns call trees (regular and inverted) and top function lists + - Created `collectCallTree()` and `collectCallTreeNode()` helpers + - Formatter: `formatThreadSamplesResult()` + +7. **profile info** (`profileInfo()` → `ProfileInfoResult`) + - Returns process/thread tree and CPU activity + - Formatter: `formatProfileInfoResult()` + +8. **marker stack** (`markerStack()` → `MarkerStackResult`) + - Returns full stack trace data for a marker + - Formatter: `formatMarkerStackResult()` + +9. **marker info** (`markerInfo()` → `MarkerInfoResult`) + - Returns marker fields, timing, and optional stack trace + - Formatter: `formatMarkerInfoResult()` + +10. **thread markers** (`threadMarkers()` → `ThreadMarkersResult`) + - Returns aggregated statistics by type and category + - Includes duration/rate stats, filtering, and custom grouping + - Formatter: `formatThreadMarkersResult()` + +## Proven Conversion Pattern + +Every command follows the same 5-step pattern: + +### Step 1: Read Current Implementation + +Identify where the current formatting happens: + +```bash +# Find the method in ProfileQuerier +grep -n "async commandName" src/profile-query/index.ts + +# Find the formatter function +grep -n "formatCommandName" src/profile-query/formatters/ +``` + +### Step 2: Extract Data Collection + +In the formatter file (e.g., `formatters/thread-info.ts`): + +```typescript +// Add new function that collects data without formatting +export function collectThreadInfo(...): ThreadInfoResult { + // Reuse existing data collection logic + const state = store.getState(); + const thread = threadSelectors.getRawThread(state); + // ... collect all data ... + + return { + type: 'thread-info', + // ... structured data ... + }; +} +``` + +Key principle: **Extract, don't duplicate**. The data collection logic already exists in the formatter - just separate it from the string concatenation. + +### Step 3: Update ProfileQuerier Method + +In `src/profile-query/index.ts`: + +```typescript +// Change return type and call new collector +async threadInfo(threadHandle?: string): Promise { + return collectThreadInfo( + this._store, + this._timestampManager, + this._threadMap, + threadHandle + ); +} +``` + +### Step 4: Add CLI Formatter + +In `src/profile-query-cli/formatters.ts`: + +```typescript +export function formatThreadInfoResult(result: ThreadInfoResult): string { + // Convert structured data back to human-readable text + // This is basically the string concatenation logic from the old formatter + return `Name: ${result.friendlyName}\n...`; +} +``` + +### Step 5: Wire Up CLI Dispatcher + +In `src/profile-query-cli/index.ts`: + +```typescript +// Add formatter import +import { formatThreadInfoResult } from './formatters'; + +// Add case to formatOutput() switch +switch (result.type) { + case 'thread-info': + return formatThreadInfoResult(result); + // ... +} +``` + +### Step 6: Test and Commit + +```bash +yarn lint-fix +yarn ts +yarn test profile-query +yarn test:cli +jj commit -m "Implement JSON support for X command" +``` + +## Common Patterns and Helpers + +### CPU Activity Data + +For commands that show CPU activity (profile info, thread info): + +- Use `collectSliceTree()` from `cpu-activity.ts` +- Returns array of `CpuActivityEntry` with timestamps and nesting depth +- Plain text formatter recreates the indented tree structure + +### Stack Traces + +For markers with stacks: + +- Define `StackTraceData` interface with frames array +- Each frame has `funcIndex`, `name`, `nameWithLibrary`, `library` +- Include `truncated` boolean if stack was limited +- Plain text formatter handles truncation display + +### Nested Structures + +For hierarchical data (processes/threads, marker groups): + +- Use arrays of objects with optional nested arrays +- Plain text formatter uses indentation to show hierarchy +- JSON output preserves full structure naturally + +## Testing Strategy + +For each converted command: + +1. **Type checking**: `yarn ts` should pass +2. **Unit tests**: `yarn test profile-query` should pass +3. **Integration tests**: `yarn test:cli` should pass +4. **Manual testing** (optional but recommended): + + ```bash + # Build and start a session + yarn build-profile-query-cli + pq load path/to/profile.json + + # Test plain text output (should match old behavior) + pq thread info + + # Test JSON output + pq thread info --json + pq thread info --json | jq '.cpuActivity[0]' + ``` + +## Benefits of This Approach + +1. **No breaking changes** - Default behavior unchanged +2. **Type safety** - Full TypeScript types for all JSON structures +3. **Reusable data** - JSON can be piped to jq, saved to files, etc. +4. **Maintainable** - Clear separation between data collection and formatting +5. **Testable** - Structured data easier to test than formatted strings +6. **Documented** - JSON structure serves as API documentation + +## Success Criteria + +✅ **All success criteria met:** + +- ✅ All 11 commands return structured `CommandResult` types +- ✅ All commands work with `--json` flag +- ✅ All commands work without `--json` flag (backward compatible) +- ✅ All tests pass (`yarn test profile-query` and `yarn test:cli`) +- ✅ Type checking passes (`yarn ts`) +- ✅ Linting passes (`yarn lint-fix`) + +The JSON output implementation is now complete. All profile-query CLI commands support both human-readable text output (default) and structured JSON output (with `--json` flag) for programmatic processing. diff --git a/marker-support-plan.md b/marker-support-plan.md new file mode 100644 index 0000000000..5149b94dd1 --- /dev/null +++ b/marker-support-plan.md @@ -0,0 +1,816 @@ +# Marker Support Implementation Plan for `pq` CLI + +## Overview + +This document outlines the implementation plan for adding comprehensive marker support to the profile-query CLI (`pq`). Markers provide ~50% of profiling insight (Layout/Reflow, JavaScript names, IPC messages, GPU boundaries) and are a critical missing feature. + +## Background + +### Marker Data Model (from codebase analysis) + +**Core Types:** + +- `Marker`: `{ start: ms, end: ms | null, name: string, category: number, data: MarkerPayload | null, threadId: Tid | null }` +- `MarkerPayload`: Union of 30+ specific payload types (Network, GCMajor, FileIO, IPC, DOMEvent, etc.) +- `MarkerSchema`: Defines display rules (`tooltipLabel`, `tableLabel`, `chartLabel`, `fields[]`, `display[]`, `description`) + +**Web UI Views:** + +1. **Marker Chart**: Rows grouped by (category, name), shows rectangles for intervals or diamonds for instants + - Each marker has a `chartLabel` (can be templated from payload data) + - Markers filtered by `display: ['marker-chart']` in schema +2. **Marker Table**: One row per marker with columns: start, end, name, tableLabel + - `tableLabel` is templated from marker schema +3. **Tooltip/Sidebar**: Shows `tooltipLabel`, all field key-value pairs, type description, stack trace if available + +**Key Properties:** + +- Instant markers: `end === null` +- Interval markers: `end !== null` +- Markers can have stacks (cause backtraces) +- Markers can have rich structured data (via MarkerSchema fields) +- Tens of thousands of markers per thread is common + +## Problem Statement + +We cannot naively print all markers and their fields to the CLI because: + +1. Profiles often contain 10,000+ markers per thread +2. Each marker can have 5-15 fields with verbose values +3. This would overwhelm both the reader and the LLM context window +4. Different marker types need different presentation strategies + +## Design Principles + +1. **Aggregation First**: Show summaries, not raw data +2. **Progressive Disclosure**: Start with overview, allow drilling down +3. **Context-Aware Grouping**: Group markers intelligently based on their characteristics +4. **Actionable Insights**: Present data that helps diagnose performance issues +5. **Format Flexibility**: Support both human-readable and machine-parseable output + +## Implementation Phases + +### Phase 1: Basic Marker Listing (MVP) + +**Goal**: Show high-level marker distribution and basic statistics + +**Commands:** + +```bash +pq thread markers # List marker groups with counts +pq thread markers --summary # Show aggregate statistics +pq marker info # Show details for a specific marker +``` + +**`pq thread markers` output:** + +``` +Markers in thread t-93 (Renderer) — 14,523 markers + +By Type (top 15): + Reflow 4,234 markers (interval: min=0.12ms, avg=2.3ms, max=45.2ms) + DOMEvent 3,891 markers (interval: min=0.01ms, avg=0.5ms, max=12.1ms) + Styles 2,456 markers (interval: min=0.05ms, avg=1.2ms, max=8.7ms) + JavaScript 1,823 markers (instant) + Paint 892 markers (interval: min=0.3ms, avg=5.1ms, max=23.4ms) + Network 234 markers (interval: min=5.2ms, avg=234.5ms, max=2341.2ms) + GCSlice 156 markers (interval: min=0.8ms, avg=12.3ms, max=156.7ms) + IPC (IPCOut) 89 markers (interval: min=0.01ms, avg=2.1ms, max=45.2ms) + ... (7 more types) + +By Category: + Layout 6,892 markers (47.5%) + JavaScript 4,234 markers (29.1%) + Graphics 1,456 markers (10.0%) + ... (4 more categories) + +Rate Analysis (markers/second): + DOMEvent: 45.2 markers/sec (rate: min=0.5ms, avg=22.1ms, max=2341ms) + Reflow: 12.3 markers/sec (rate: min=1.2ms, avg=81.2ms, max=5234ms) + Styles: 8.9 markers/sec (rate: min=2.1ms, avg=112.4ms, max=8912ms) + +Use --type to filter, --details for per-marker info, or m- handles to inspect individual markers. +``` + +**`pq thread markers --summary` output:** + +``` +Marker Summary for thread t-93 (Renderer) + +Total markers: 14,523 +Time range: 2.145s - 15.891s (13.746s) +Marker types: 22 +Marker categories: 7 + +Instant markers: 2,891 (19.9%) +Interval markers: 11,632 (80.1%) + +Duration statistics (interval markers only): + Min: 0.01ms + Avg: 3.4ms + Median: 1.2ms + P95: 12.3ms + P99: 45.6ms + Max: 234.5ms + +Longest intervals: + m-1234: Paint - 234.5ms (7.234s - 7.469s) + m-5678: GCMajor - 156.7ms (10.123s - 10.280s) + m-3456: Reflow - 89.3ms (12.456s - 12.545s) + m-7890: Network (https://example.com/api) - 2341.2ms (3.234s - 5.575s) + m-2345: DOMEvent (click) - 45.2ms (8.123s - 8.168s) +``` + +**Implementation Tasks:** + +- [x] Create `formatters/marker-info.ts` for marker formatting logic +- [x] Implement `MarkerMap` class (similar to `ThreadMap`, `FunctionMap`) +- [x] Add marker aggregation functions (group by type, category, compute stats) +- [x] Add `ProfileQuerier.threadMarkers()` method +- [x] Add `ProfileQuerier.markerInfo(markerHandle)` method +- [x] Wire up in protocol.ts, daemon.ts, index.ts +- [x] Add unit tests for utility functions (formatDuration, computeDurationStats, computeRateStats) + +**Status: ✅ COMPLETE** (Commits: 25eaf637, 63e5d2d7) + +**Data Structures:** + +```typescript +// MarkerMap for handle management +class MarkerMap { + private markers: Marker[] = []; + private handleToIndex: Map = new Map(); + private indexToHandle: Map = new Map(); + + registerMarker(marker: Marker, index: MarkerIndex): string; + getMarker(handle: string): Marker | null; + getMarkers(): Marker[]; +} + +// Aggregation structures +interface MarkerTypeStats { + typeName: string; + count: number; + isInterval: boolean; + durationStats?: DurationStats; + rateStats?: RateStats; + topMarkers: Array<{ handle: string; label: string; duration?: number }>; +} + +interface DurationStats { + min: number; + max: number; + avg: number; + median: number; + p95: number; + p99: number; +} + +interface RateStats { + markersPerSecond: number; + minGap: number; + avgGap: number; + maxGap: number; +} +``` + +### Phase 2: Filtering and Search + +**Goal**: Allow users to filter markers by various criteria + +**Status: ✅ COMPLETE** (Commits: 1e478dcc, 43ccb20c) + +**What was implemented:** + +Phase 2 provides both text-based search and duration-based filtering: + +**Search filtering** leverages the existing marker search functionality built into the profiler codebase. This approach: + +- Reuses tested, proven code from the web UI +- Avoids duplicating complex filtering logic +- Provides a simple, flexible search interface + +**Duration filtering** is implemented by filtering marker indexes after search filtering: + +- Filters markers by minimum and/or maximum duration in milliseconds +- Excludes instant markers when duration constraints are specified +- Supports combination with search filtering + +**Commands:** + +```bash +pq thread markers --search DOMEvent # Search for "DOMEvent" markers +pq thread markers --search Stack # Search for markers with "Stack" in name +pq thread markers --category Graphics # Filter by Graphics category +pq thread markers --category GC # Partial match: matches "GC / CC" +pq thread markers --has-stack # Only markers with stack traces +pq thread markers --min-duration 10 # Markers with duration >= 10ms +pq thread markers --max-duration 100 # Markers with duration <= 100ms +pq thread markers --limit 1000 # Limit to first 1000 markers +pq thread markers --min-duration 5 --max-duration 50 # Markers between 5-50ms +pq thread markers --category Other --min-duration 10 # Combined category and duration +pq thread markers --has-stack --category Layout --min-duration 1 # All filters combined +pq thread markers --category Layout --limit 50 # Limit after filtering +pq thread markers --search Reflow --min-duration 5 # Combined search and duration +``` + +**Output example:** + +```bash +$ pq thread markers --search DOMEvent + +Markers in thread t-0 (Parent Process) — 2849 markers (filtered from 258060) + +By Type (top 15): + DOMEvent 2849 markers (interval: min=0.40μs, avg=14.53μs, max=2.25ms) + +By Category: + DOM 2849 markers (100.0%) + +Rate Analysis (markers/second): + DOMEvent: 93.7 markers/sec (rate: min=2.40μs, avg=10.68ms, max=3.37s) + +Use --search to filter markers, or m- handles to inspect individual markers. +``` + +**Implementation approach:** + +```typescript +// Use built-in marker search instead of custom filtering +if (searchString) { + store.dispatch(changeMarkersSearchString(searchString)); +} +const filteredIndexes = searchString + ? threadSelectors.getSearchFilteredMarkerIndexes(state) + : threadSelectors.getFullMarkerListIndexes(state); +// Always clear search after use +store.dispatch(changeMarkersSearchString('')); +``` + +**Completed Tasks:** + +- [x] Add `--search` parameter to CLI +- [x] Wire up `changeMarkersSearchString` action dispatch +- [x] Use `getSearchFilteredMarkerIndexes` selector for filtered results +- [x] Add try/finally block to ensure search string is cleared +- [x] Update protocol, daemon, and CLI to pass search string +- [x] Show "(filtered from N)" when filters are active +- [x] Update help text with search examples +- [x] Add `--min-duration` and `--max-duration` parameters to CLI +- [x] Implement duration filtering by whittling down filtered marker indexes +- [x] Add validation for duration parameter inputs +- [x] Update help text with duration filtering examples +- [x] Add `--category` parameter to CLI for filtering by category name +- [x] Implement case-insensitive substring matching for category filtering +- [x] Update help text with category filtering examples + +**Additional enhancements (implemented):** + +- [x] `--min-duration ` - Filter by minimum duration (Committed: 43ccb20c) +- [x] `--max-duration ` - Filter by maximum duration (Committed: 43ccb20c) +- [x] `--category ` - Filter by category name with case-insensitive substring matching (Committed: 7a44d4f9) +- [x] `--has-stack` - Only show markers with stack traces (Committed: bd52b911) +- [x] `--limit ` - Limit the number of markers used in aggregation (Committed: pending) + +Duration filtering is implemented by filtering the marker indexes after search filtering. Instant markers (markers with no end time) are excluded when duration constraints are specified. + +Category filtering uses case-insensitive substring matching on category names, allowing partial matches (e.g., "GC" matches "GC / CC"). + +Stack filtering checks for markers that have a `cause` field in their data payload, which contains stack trace information. + +Limit caps the number of markers used in aggregation after all other filters are applied. This is useful for quickly examining a subset of markers from large profiles. + +**Future enhancements (not yet implemented):** + +The following filtering options could be added in the future: + +- [ ] `--field :` - Filter by field values + +### Phase 3: Smart Grouping and Sub-grouping + +**Goal**: Handle markers with similar names but different characteristics + +**Status: ✅ COMPLETE** (Commits: 7c64ef07, ae17f140) + +**What was implemented:** + +Phase 3 provides multi-level marker grouping with both manual and automatic grouping strategies: + +**Custom grouping** via `--group-by` allows hierarchical grouping by any combination of: + +- `type`: Marker type (data.type) +- `name`: Marker name +- `category`: Category name +- `field:`: Any marker field (e.g., `field:eventType`, `field:phase`) + +**Auto-grouping** via `--auto-group` uses a smart heuristic to automatically select the best field for sub-grouping: + +- Prefers fields with 3-20 unique values (ideal grouping range) +- Avoids fields with too many unique values (>50, likely IDs or timestamps) +- Requires fields to appear in >80% of markers +- Boosts score for fields that appear in all markers + +**Commands:** + +```bash +pq thread markers --group-by type,name # Group by type, then name +pq thread markers --group-by type,field:eventType # Group by type, then eventType field +pq thread markers --group-by category,type # Group by category, then type +pq thread markers --auto-group # Automatic smart grouping +pq thread markers --search DOMEvent --group-by field:eventType # Filter + custom grouping +``` + +**Output example:** + +```bash +$ pq thread markers --search DOMEvent --auto-group --limit 200 + +Markers in thread t-0 (Parent Process) — 200 markers (filtered from 258060) + +By Type (top 15): + DOMEvent 200 markers (interval: min=0.60μs, avg=41.65μs, max=2.06ms) + pointermove 59 markers (interval: min=1.30μs, avg=2.82μs, max=4.50μs) + mousemove 59 markers (interval: min=16.00μs, avg=27.55μs, max=84.30μs) + MozAfterPaint 16 markers (interval: min=1.30μs, avg=4.41μs, max=8.50μs) + mouseenter 15 markers (interval: min=0.60μs, avg=1.07μs, max=3.20μs) + mouseleave 13 markers (interval: min=0.60μs, avg=1.25μs, max=3.10μs) + ... +``` + +**Completed Tasks:** + +- [x] Implement multi-level grouping (group by multiple keys) +- [x] Add auto-grouping heuristic (analyze marker field variance with smart scoring) +- [x] Add `--group-by` flag with support for `type`, `name`, `category`, `field:` +- [x] Implement sub-group statistics (per-group duration/rate stats) +- [x] Add hierarchical display with proper indentation +- [x] Limit recursive depth to 3 levels to prevent excessive nesting + +### Phase 4: Marker Details and Field Display + +**Goal**: Show detailed information for individual markers + +**Status: ✅ COMPLETE** (Commits: 77c95d9d, d9817b0d) + +**What was implemented:** + +Phase 4 provides comprehensive marker inspection with detailed field display and complete stack trace viewing. Marker handles are now visible in marker listings, making it easy to inspect specific markers. + +**Commands:** + +```bash +pq marker info # Full marker details with stack preview +pq marker stack # Complete stack trace (all frames) +``` + +**Actual output:** + +```bash +$ pq thread markers --has-stack --limit 3 + +Markers in thread t-0 (Parent Process) — 3 markers (filtered from 258060) + +By Type (top 15): + TextStack 1 markers (interval: min=651.20μs, avg=651.20μs, max=651.20μs) + Examples: m-1 (651.20μs) + Text 1 markers (interval: min=1.93ms, avg=1.93ms, max=1.93ms) + Examples: m-2 (1.93ms) + FlowMarker 1 markers (instant) + Examples: m-3 + +$ pq marker info m-1 + +Marker m-1: NotifyObservers - NotifyObservers + +Type: TextStack +Category: Other +Time: 1h2m - 1h2m (651.20μs) +Thread: t-0 (Parent Process) + +Fields: + Details: profiler-started + +Stack trace: + Captured at: 1h2m + [1] xul.dll!NotifyObservers(char const*, nsISupports*) + [2] xul.dll!NotifyProfilerStarted(mozilla::PowerOfTwo const&, mozilla::Maybe) + [3] xul.dll!profiler_start(mozilla::PowerOfTwo) + ... + [20] xul.dll!js::InternalCallOrConstruct(JSContext*, JS::CallArgs const&, js::) + ... (101 more frames) + +Use 'pq marker stack m-1' for the full stack trace. + +$ pq marker stack m-1 + +Stack trace for marker m-1: NotifyObservers + +Thread: t-0 (Parent Process) +Captured at: 1h2m + + [1] xul.dll!NotifyObservers(char const*, nsISupports*) + [2] xul.dll!NotifyProfilerStarted(mozilla::PowerOfTwo const&, mozilla::Maybe) + ... + [120] ntdll.dll!RtlUserThreadStart + [121] (root) +``` + +**Implementation details:** + +- **Marker handles visible**: Top 3 example markers shown for each type with handles and durations +- **`pq marker info`**: Shows full marker details with stack trace preview (first 20 frames) +- **`pq marker stack`**: Displays complete stack traces without frame limit +- **Stack formatting**: Reuses formatFunctionNameWithLibrary() for consistent display with library names +- **MarkerSchema integration**: Fields formatted using existing MarkerSchema formatters from web UI + +**Implementation Tasks:** + +- [x] Implement `markerInfo(handle)` method +- [x] Format marker fields using MarkerSchema formatters +- [x] Add stack trace formatting (walks stack table, formats with library names) +- [x] Implement `markerStack(handle)` method for full stack traces +- [x] Display marker handles in listings (top 3 examples per type) +- [x] Wire up in protocol.ts, daemon.ts, index.ts +- [x] Update CLI help text and examples + +**Future enhancements (not yet implemented):** + +- [ ] `pq marker expand ` - Show full field values for truncated fields +- [ ] `--format json` option for machine-readable output + +### Phase 5: Temporal Visualization (ASCII Charts) + +**Goal**: Provide a compact visual representation of marker timing + +**Commands:** + +```bash +pq thread markers --timeline # ASCII timeline +pq thread markers --type Reflow --timeline # Timeline for specific type +pq thread markers --histogram # Duration histogram +``` + +**Output example:** + +```bash +$ pq thread markers --type Reflow --timeline + +Reflow markers timeline (thread t-93, 2.145s - 15.891s, 13.746s total) + +Duration histogram (4,234 markers): + 0-1ms ████████████████████████████████████████ 1,892 (44.7%) + 1-2ms ████████████████████ 956 (22.6%) + 2-5ms ████████████ 587 (13.9%) + 5-10ms ██████ 324 (7.7%) + 10-20ms ███ 234 (5.5%) + 20-50ms ██ 189 (4.5%) + 50-100ms █ 45 (1.1%) + 100ms+ █ 7 (0.2%) + +Timeline (each char = 137ms, | = marker): +2.1s | | || | | | | || | | | | | 3.5s +3.5s | | | || ||| | | | | || | | | | 4.9s +4.9s | ||| | | | || | | | || | | | | 6.3s +6.3s | | | ||| | | | | | | | | | | | | 7.7s +7.7s | || | | | | || | | | || | | | | 9.1s +9.1s | | | || | | | | | | || | | | | | 10.5s +10.5s | | | | || | | || | | | | || | | | 11.9s +11.9s | | | || | | | | | | || | | | | | 13.3s +13.3s | | | | | || | | | || | | | | | | | | 14.7s +14.7s | || | | | | | || | | | | | | | | | 15.9s + +Density over time (markers per second): + 2-4s: 12.3/s ████████ + 4-6s: 18.7/s ████████████ + 6-8s: 8.9/s ██████ + 8-10s: 23.4/s ███████████████ + 10-12s: 15.6/s ██████████ + 12-14s: 11.2/s ███████ + 14-16s: 6.7/s ████ + +Peak activity: 8.123s - 8.456s (23 markers in 333ms window) +``` + +**Implementation Tasks:** + +- [ ] Implement ASCII timeline generator +- [ ] Implement duration histogram generator +- [ ] Add density analysis (markers per time bucket) +- [ ] Make timeline resolution configurable (auto-adjust to terminal width) +- [ ] Add `--width` flag to control chart width + +### Phase 6: Advanced Analysis Features + +**Goal**: Provide deeper insights into marker patterns + +**Commands:** + +```bash +pq thread markers --rate-analysis # Analyze marker rate patterns +pq thread markers --type Network --waterfall # Network waterfall chart +pq thread markers --overlap-analysis # Find overlapping markers +pq thread markers --critical-path # Identify critical path markers +``` + +**Rate Analysis Output:** + +```bash +$ pq thread markers --type DOMEvent --rate-analysis + +Rate analysis for DOMEvent markers (thread t-93) + +Overall rate: 45.2 markers/sec + +Inter-marker gaps (time between successive markers): + Min: 0.5ms + Avg: 22.1ms + Median: 18.7ms + P95: 89.3ms + P99: 234.5ms + Max: 2341.2ms + +Burst detection (3+ markers within 50ms): + Burst at 8.123s: 5 markers in 23ms (click cascade) + Burst at 10.234s: 8 markers in 45ms (scroll events) + Burst at 12.456s: 4 markers in 31ms (mousemove cluster) + ... (12 more bursts) + +Idle periods (>1000ms without markers): + 1.234s - 2.567s (1333ms) + 5.678s - 7.123s (1445ms) + 13.890s - 15.234s (1344ms) +``` + +**Network Waterfall Output:** + +```bash +$ pq thread markers --type Network --waterfall + +Network waterfall (thread t-93, 50 requests) + +Time Duration Status URL +2.145s ████████ 200 https://example.com/api/users +2.234s ██ 200 https://cdn.example.com/logo.png +2.267s ███ 200 https://cdn.example.com/style.css +2.289s ████ 200 https://cdn.example.com/app.js +2.345s ████████████ 200 https://api.example.com/data?page=1 +2.456s ██ 304 https://cdn.example.com/font.woff2 +... + +Legend: + ████ = Request (DNS + Connect + Request + Response) + Each █ = ~50ms +``` + +**Implementation Tasks:** + +- [ ] Implement rate analysis (gap statistics, burst detection) +- [ ] Implement overlap detection (find concurrent markers) +- [ ] Add network waterfall visualization +- [ ] Add critical path analysis (longest marker chains) +- [ ] Add `--export` flag to save analysis to JSON/CSV + +## Technical Implementation Details + +### Component Structure + +``` +src/profile-query/ +├── index.ts # ProfileQuerier class +├── formatters/ +│ ├── marker-info.ts # Marker listing/summary formatters +│ ├── marker-details.ts # Individual marker detail formatter +│ ├── marker-timeline.ts # ASCII timeline/histogram generators +│ └── marker-analysis.ts # Advanced analysis formatters +├── marker-map.ts # MarkerMap handle manager +├── marker-aggregator.ts # Marker aggregation logic +├── marker-filter.ts # Marker filtering logic +└── marker-grouping.ts # Smart grouping heuristics +``` + +### Key Algorithms + +**1. Marker Aggregation** + +```typescript +function aggregateMarkersByType( + markers: Marker[], + markerSchemaByName: MarkerSchemaByName +): MarkerTypeStats[] { + const groups = new Map(); + + for (const marker of markers) { + const type = marker.data?.type ?? 'Unknown'; + if (!groups.has(type)) { + groups.set(type, []); + } + groups.get(type)!.push(marker); + } + + return Array.from(groups.entries()).map(([type, markers]) => ({ + typeName: type, + count: markers.length, + isInterval: markers[0].end !== null, + durationStats: computeDurationStats(markers), + rateStats: computeRateStats(markers), + topMarkers: selectTopMarkers(markers, 5), + })); +} +``` + +**2. Duration Statistics** + +```typescript +function computeDurationStats(markers: Marker[]): DurationStats | undefined { + const durations = markers + .filter((m) => m.end !== null) + .map((m) => m.end! - m.start) + .sort((a, b) => a - b); + + if (durations.length === 0) return undefined; + + return { + min: durations[0], + max: durations[durations.length - 1], + avg: durations.reduce((a, b) => a + b, 0) / durations.length, + median: durations[Math.floor(durations.length / 2)], + p95: durations[Math.floor(durations.length * 0.95)], + p99: durations[Math.floor(durations.length * 0.99)], + }; +} +``` + +**3. Rate Statistics** + +```typescript +function computeRateStats(markers: Marker[]): RateStats { + const sorted = [...markers].sort((a, b) => a.start - b.start); + const gaps: number[] = []; + + for (let i = 1; i < sorted.length; i++) { + gaps.push(sorted[i].start - sorted[i - 1].start); + } + + const timeRange = sorted[sorted.length - 1].start - sorted[0].start; + const markersPerSecond = (markers.length / timeRange) * 1000; + + return { + markersPerSecond, + minGap: Math.min(...gaps), + avgGap: gaps.reduce((a, b) => a + b, 0) / gaps.length, + maxGap: Math.max(...gaps), + }; +} +``` + +**4. Smart Grouping** + +```typescript +function autoGroupMarkers( + markers: Marker[], + schema: MarkerSchema +): GroupingStrategy { + // Analyze variance in marker fields + const fieldVariance = analyzeFieldVariance(markers, schema); + + // If a field has high variance (e.g., eventType in DOMEvent markers), + // use it as a grouping key + const highVarianceFields = fieldVariance + .filter((f) => f.uniqueRatio > 0.3) // >30% unique values + .sort((a, b) => b.uniqueRatio - a.uniqueRatio); + + if (highVarianceFields.length > 0) { + return { type: 'field', field: highVarianceFields[0].key }; + } + + // Fall back to type-level grouping + return { type: 'type' }; +} +``` + +### Protocol Updates + +**`protocol.ts`:** + +```typescript +export type ClientCommand = + | { command: 'thread'; subcommand: 'markers'; options?: MarkerListOptions } + | { command: 'marker'; subcommand: 'info'; marker: string } + | { command: 'marker'; subcommand: 'stack'; marker: string } + | { command: 'marker'; subcommand: 'expand'; marker: string; field: string }; +// ... existing commands + +interface MarkerListOptions { + type?: string; // Filter by type + category?: string; // Filter by category + minDuration?: number; // Min duration in ms + maxDuration?: number; // Max duration in ms + nameFilter?: string; // Regex for name + fieldFilter?: string; // Format: "field:value" + hasStack?: boolean; // Only markers with stacks + groupBy?: string; // Grouping strategy + timeline?: boolean; // Show ASCII timeline + histogram?: boolean; // Show duration histogram + summary?: boolean; // Show summary only + limit?: number; // Limit output lines + format?: 'text' | 'json'; // Output format +} +``` + +## Testing Strategy + +1. **Unit Tests** (`src/test/unit/profile-query-markers.test.ts`): + - Test marker aggregation functions + - Test filtering logic + - Test statistics calculations + - Test ASCII chart generation + +2. **Integration Tests** (`yarn test:cli`): + - Test marker listing with real profiles + - Test filtering combinations + - Test grouping strategies + - Test marker detail display + +3. **Manual Testing**: + - Test with large profiles (10k+ markers) + - Test with different marker types + - Test edge cases (no markers, single marker, all instant/interval) + +## Performance Considerations + +1. **Lazy Marker Loading**: Don't load all marker details unless needed +2. **Pagination**: For large result sets, support pagination (e.g., `--page 2 --page-size 50`) +3. **Streaming Output**: For very large listings, stream output instead of buffering +4. **Caching**: Cache aggregated stats in ProfileQuerier for repeated queries +5. **Sampling**: For >10k markers, consider sampling for histogram/timeline + +## Open Questions / Design Decisions + +1. **Handle Persistence**: Should marker handles (m-N) be stable across sessions, or ephemeral? + - **Decision**: Ephemeral within session (like function handles), reset on each `pq load` + +2. **Default Grouping**: What should be the default grouping strategy? + - **Decision**: Group by type first, with option to drill down + +3. **Timeline Resolution**: How to auto-adjust timeline character width? + - **Decision**: Divide time range by terminal width (default 80 chars), cap at 1 char = 10ms minimum + +4. **Field Display**: Should we show all fields by default or only non-hidden fields? + - **Decision**: Follow marker schema `hidden` flag, add `--all-fields` to override + +5. **Stack Traces**: Should stacks be shown inline or require separate command? + - **Decision**: Show truncated stack (top 5 frames) in `marker info`, full stack in `marker stack` + +6. **JSON Output**: What should the JSON schema look like? + - **Decision**: Match web UI's marker structure, include all computed stats + +## Success Metrics + +1. Can view marker distribution across a thread in <5 seconds +2. Can identify performance bottlenecks (long markers) in <3 commands +3. Can filter to specific marker types/categories in 1 command +4. Can inspect individual marker details including stack traces +5. Output is readable and actionable for performance analysis + +## Future Enhancements (Post-Launch) + +1. **Marker Comparison**: Compare marker patterns between two time ranges +2. **Marker Correlation**: Find correlations between different marker types +3. **Marker Export**: Export filtered markers to flamegraph format +4. **Marker Diff**: Compare markers between two profiles +5. **Smart Filters**: Pre-defined filters for common analysis tasks (e.g., "long-layout", "slow-network") +6. **Interactive Mode**: TUI for browsing markers interactively + +## Implementation Priority + +**Must Have (Phase 1 & 2):** + +- Basic marker listing with aggregation +- Marker type/category filtering +- Individual marker details +- Duration statistics + +**Should Have (Phase 3 & 4):** + +- Smart grouping by fields +- Marker stack traces +- Field-based filtering + +**Nice to Have (Phase 5 & 6):** + +- ASCII timeline/histogram +- Rate analysis +- Network waterfall +- Critical path analysis + +## Timeline Estimate + +- Phase 1 (Basic Listing): 2-3 days +- Phase 2 (Filtering): 1-2 days +- Phase 3 (Grouping): 1-2 days +- Phase 4 (Details): 1-2 days +- Phase 5 (Visualization): 2-3 days +- Phase 6 (Advanced): 2-3 days + +**Total**: ~2 weeks for full implementation +**MVP (Phases 1-2)**: ~1 week diff --git a/package.json b/package.json index d0c60847c9..69816bc59f 100644 --- a/package.json +++ b/package.json @@ -21,6 +21,9 @@ "build-photon": "webpack --config res/photon/webpack.config.js", "build-symbolicator-cli": "yarn build-symbolicator-cli:quiet --progress", "build-symbolicator-cli:quiet": "yarn build:clean && cross-env NODE_ENV=production webpack --config src/symbolicator-cli/webpack.config.js", + "build-profile-query": "yarn build-profile-query:quiet --progress", + "build-profile-query:quiet": "yarn build:clean && cross-env NODE_ENV=production webpack --config src/profile-query/webpack.config.js", + "build-profile-query-cli": "rimraf src/profile-query-cli/dist && mkdirp src/profile-query-cli/dist && cross-env NODE_ENV=production webpack --config src/profile-query-cli/webpack.config.js && chmod +x src/profile-query-cli/dist/pq.js", "lint": "node bin/output-fixing-commands.js run-p lint-js lint-css prettier-run", "lint-fix": "run-p lint-fix-js lint-fix-css prettier-fix", "lint-js": "node bin/output-fixing-commands.js eslint . --report-unused-disable-directives --cache --cache-strategy content", @@ -29,7 +32,7 @@ "lint-fix-css": "yarn lint-css --fix", "prettier-run": "node bin/output-fixing-commands.js prettier --check . --cache --cache-strategy content --cache-location .prettiercache", "prettier-fix": "prettier --write . --cache --cache-strategy content --cache-location .prettiercache", - "ts": "tsc", + "ts": "tsc && true", "protoc": "npx -p protobufjs-cli pbjs -t static-module -w commonjs -o ./src/profile-logic/import/proto/simpleperf_report.js ./src/profile-logic/import/proto/simpleperf_report.proto && npx -p protobufjs-cli pbts -o ./src/profile-logic/import/proto/simpleperf_report.d.ts ./src/profile-logic/import/proto/simpleperf_report.js", "license-check": "devtools-license-check", "preinstall": "node bin/pre-install.js", @@ -43,13 +46,14 @@ "start-docs": "ws -d docs-user/ -p 3000", "start-photon": "node res/photon/server", "test": "node bin/output-fixing-commands.js cross-env LC_ALL=C TZ=UTC NODE_ENV=test jest", - "test-all": "run-p --max-parallel 4 ts license-check lint test test-alex test-lockfile", + "test-all": "run-p --max-parallel 4 ts license-check lint test test-alex test-lockfile && yarn test:cli", "test-build-coverage": "yarn test --coverage --coverageReporters=html", "test-serve-coverage": "ws -d coverage/ -p 4343", "test-coverage": "run-s test-build-coverage test-serve-coverage", "test-alex": "alex ./docs-* CODE_OF_CONDUCT.md CONTRIBUTING.md README.md", "test-lockfile": "lockfile-lint --path yarn.lock --allowed-hosts yarn --validate-https", "test-debug": "cross-env LC_ALL=C TZ=UTC NODE_ENV=test node --inspect-brk node_modules/.bin/jest --runInBand", + "test:cli": "yarn build-profile-query-cli && jest --selectProjects=cli", "postinstall": "patch-package" }, "license": "MPL-2.0", diff --git a/pq-case-study-2.md b/pq-case-study-2.md new file mode 100644 index 0000000000..d0055461c8 --- /dev/null +++ b/pq-case-study-2.md @@ -0,0 +1,527 @@ +# pq Case Study 2: Investigating Repeated Rendering Spikes in Firefox + +**Profile:** https://share.firefox.dev/4oLEjCw +**Date:** November 4, 2025 +**Investigator:** Claude (via pq CLI) + +## Executive Summary + +Using pq, I investigated a Firefox performance profile showing repeated GPU rendering spikes. The investigation revealed that the GPU Renderer thread was spending ~27% of spike time in Present operations (DirectComposition/DXGI), triggered by a loop of WM_PAINT messages on the main thread. The main thread would trigger rendering work, wait for the GPU (FlushRendering), and repeat. + +## Investigation Process + +### Initial Exploration + +```bash +pq load 'https://share.firefox.dev/4oLEjCw' +pq profile info +``` + +**Observation:** The profile overview immediately showed the GPU process (p-14) consuming 16.1s of CPU, with the Renderer thread (t-93) at 7.9s being the hottest thread. Multiple CPU spike periods were visible at 160% (2 cores). + +### Deep Dive into GPU Thread + +```bash +pq thread select t-93 +pq thread samples +``` + +**Problem:** The output was **extremely verbose** - over 2000 lines for the full profile view. While comprehensive, it required significant scrolling and cognitive effort to digest. The top functions list showed 50 entries before truncating 2224 more. + +**Finding:** In the full profile: + +- 63.5% of time: Thread idle/waiting +- 36.5% of time: Active rendering work +- 16.4% of active time: DCSwapChain::Present operations +- 20.4% of active time: composite_simple + +### Zooming into Spike Periods + +```bash +pq view push ts-6,ts-7 +pq thread samples | head -n 100 +``` + +**Positive Experience:** After zooming into a specific spike period (391ms), the output became **much more manageable** - only 179 samples vs 14,466 for the full profile. The percentages shifted dramatically: + +- 42.5% idle (down from 63.5%) +- 57.5% in UpdateAndRender +- 27.4% in Present operations + +This focused view made it easy to see that during spikes, the thread was spending proportionally more time presenting frames. + +```bash +pq status +pq view pop +``` + +**Positive Experience:** The `status` command clearly showed my current context (selected thread and view range). `view pop` cleanly restored the previous view. + +### Investigating the Trigger + +```bash +pq thread select t-0 +pq thread samples | head -n 80 +``` + +**Finding:** The main thread (GeckoMain) was: + +- 43% idle (waiting for GPU) +- 77% of active time in OnPaint → ProcessPendingUpdates +- Waiting in PCompositorBridge::Msg_FlushRendering + +**Root cause:** A loop of WM_PAINT messages triggering repeated rendering work, with the main thread blocking on GPU completion before proceeding. + +## What Worked Well + +### 1. **Progressive Exploration Model** + +The workflow of `profile info` → `thread select` → `thread samples` → `view push` → drill down worked naturally. Each command provided the context needed for the next step. + +### 2. **Thread Handle System** + +Thread handles like `t-93`, `t-0` were **concise and memorable**. Once I saw "t-93 (Renderer)" in the profile overview, I could directly select it without searching. + +### 3. **Time Range Navigation** + +- **Timestamp names** (ts-6, ts-7, etc.) made it trivial to zoom into spike periods identified in the overview +- **View range stack** (`push`/`pop`) allowed easy exploration without losing context +- `status` command provided clear confirmation of current state + +### 4. **Profile Info Overview** + +The hierarchical CPU activity breakdown was excellent: + +``` +- 81% for 30409.5ms (1865812 samples): [ts-1,ts-z] + - 160% for 390.6ms (27322 samples): [ts-6,ts-7] + - 160% for 255.3ms (18215 samples): [ts-8,ts-9] +``` + +This immediately highlighted where to investigate, with ready-to-use timestamp ranges. + +### 5. **Consistent Command Structure** + +Commands followed predictable patterns: + +- `pq ` (e.g., `thread select`, `view push`) +- Optional flags for refinement (`--thread t-0`) +- Clear, descriptive output + +## What Didn't Work Well + +### 1. **Overwhelming Verbosity in Wide Views** ⚠️ + +**Problem:** `thread samples` output for the full profile was **2000+ lines**. This is cognitively exhausting in a terminal. + +**Impact:** + +- Hard to find actionable information quickly +- Need to pipe through `head` or scroll extensively +- Function list shows "50 entries" but mentions "2224 more omitted" - makes it unclear if I'm missing something important + +**Suggestion:** Add a `--limit N` flag to truncate output: + +```bash +pq thread samples --limit 20 # Show only top 20 functions +``` + +Or make the default output more concise (e.g., top 15-20 functions only, with an explicit "use --verbose for full output" message). + +### 2. **No Function Search/Filter** ❌ + +**Problem:** Once I saw the profile overview, I wanted to search for specific functions (e.g., "how much time in Present?"). Currently, I have to: + +1. Run `thread samples` (2000+ lines) +2. Manually search through output or pipe to `grep` +3. Parse percentages manually + +**Suggestion:** Add function search/filter: + +```bash +pq thread search "Present" +pq thread functions --filter "atidxx64" # Show only AMD driver functions +pq function info "DCSwapChain::Present" # Details about a specific function +``` + +### 3. **Call Tree Format is Hard to Parse** + +**Problem:** The ASCII tree is deeply nested and uses UTF-8 box characters: + +``` +└─ └─ └─ └─ └─ └─ └─ └─ └─ └─ └─ └─ └─ └─ └─ ├─ └─ ├─ ... +``` + +After 10+ levels of nesting, it's **visually overwhelming** and hard to follow lineage. + +**Impact:** + +- Difficult to trace execution paths +- Hard to identify "where am I in the stack?" +- The "... (N more children)" truncation breaks flow + +**Suggestion:** + +- Limit tree depth display (show top 5-10 levels by default) +- Add indentation-based format option: + ``` + RenderThread::UpdateAndRender [57.5%] + RendererOGL::UpdateAndRender [54.2%] + wr_renderer_render [48.6%] + Renderer::render [48.6%] + Renderer::draw_frame [43.0%] + composite_frame [35.2%] + composite_simple [35.2%] + PresentImpl [27.4%] + ``` +- Add a `--tree-depth N` flag + +### 4. **No Comparison Between Time Ranges** ❌ + +**Problem:** I identified a spike period (ts-6 to ts-7) where Present was 27.4% of time, vs 16.4% in the full profile. But I had to **manually compare** by running commands twice and noting differences. + +**Suggestion:** Add range comparison: + +```bash +pq view compare ts-6,ts-7 vs ts-8,ts-9 +# Shows side-by-side differences in top functions +``` + +### 5. **No Markers/Events View** ❌ + +**Problem:** The thread info showed "297515 markers" for the main thread, but there's **no way to view them**. Markers often provide critical context (e.g., "Reflow", "Styles", "JavaScript" markers). + +**Suggestion:** Implement marker commands: + +```bash +pq thread markers # List recent markers +pq thread markers --type Reflow # Filter by type +pq marker info # Marker details +``` + +### 6. **Missing Symbol Information is Opaque** 🔶 + +**Problem:** AMD GPU driver functions appear as: + +``` +atidxx64.dll!fun_3e8f0 - total: 2354 (16.3%) +atidxx64.dll!fun_a56960 - self: 598 (4.1%) +``` + +These are **meaningless** for diagnosis. While it's expected that third-party binaries lack symbols, pq provides **no indication** that: + +- These are unsymbolicated +- What type of component this is (GPU driver) +- Whether symbolication was attempted + +**Impact:** Users may think these are real function names rather than placeholder addresses. + +**Suggestion:** + +- Clearly mark unsymbolicated functions: `atidxx64.dll!` +- Group by module in output: "AMD GPU Driver (unsymbolicated): 25% total" +- Add metadata about module types (system library, GPU driver, etc.) + +### 7. **No Aggregated "Waiting Time" View** ⚠️ + +**Problem:** I saw 63.5% of GPU thread time was in `ZwWaitForAlertByThreadId` (waiting), but there's no easy way to see: + +- What the thread is waiting _for_ +- All waiting periods aggregated +- Patterns in wait times + +**Suggestion:** + +```bash +pq thread waits # Show all wait operations +pq thread waits --min-duration 10ms # Filter significant waits +``` + +### 8. **No "Heaviest Stack" or Sample View** ❌ + +**Problem:** The profiler UI shows "heaviest stack" (the single most expensive call stack). This is often the smoking gun. pq only shows aggregated functions and trees. + +**Suggestion:** + +```bash +pq thread stacks # Show heaviest individual stacks +pq thread stacks --limit 5 # Top 5 heaviest +pq sample info # Details about a specific sample +``` + +## Cognitive Load Assessment + +### Low Cognitive Load ✓ + +- **Progressive disclosure:** Start with overview, drill down as needed +- **Consistent patterns:** Commands are predictable +- **Clear state:** `status` always shows where you are +- **Good naming:** Thread handles (t-93) and timestamp names (ts-6) are intuitive + +### High Cognitive Load ⚠️ + +- **Output volume:** Full profile views are overwhelming (2000+ lines) +- **Manual correlation:** Must compare outputs mentally or with external tools +- **Tree parsing:** Deep call stacks are hard to follow +- **Missing context:** No markers, no sample-level view, no wait analysis + +### Recommendations + +1. **Default to concise output** (top 15-20 items), with `--verbose` for full details +2. **Add summary statistics** at the end of output (e.g., "Top 3 functions account for 45% of time") +3. **Implement filtering** to reduce noise (by function name, module, threshold) +4. **Add comparison commands** to reduce mental arithmetic + +## Output Quality + +### What's Good ✓ + +- **Percentages are clear:** Both absolute (time) and relative (%) shown +- **Hierarchical structure:** Process → Thread → Function breakdowns are logical +- **Time formatting:** Milliseconds for short durations, seconds for long +- **Sample counts:** Shown alongside time, helpful for confidence + +### What's Missing ⚠️ + +- **Context indicators:** No indication when symbols are missing +- **Noise filtering:** Low-impact functions (< 1%) dominate output +- **Actionable guidance:** Output doesn't suggest next steps (e.g., "Focus on these 3 hot functions") +- **Visual hierarchy:** Everything has equal weight in plain text + +### What's Excessive 🔶 + +- **Boilerplate call stacks:** Lines 1-15 of every stack are always the same (RtlUserThreadStart → BaseThreadInitThunk → ...) +- **Truncated function names:** Some C++ template names are cut off mid-word (e.g., `mozilla::interceptor::FuncHook # Obvious +pq profile info # Logical +pq thread select t-93 # Clear +pq thread samples # Descriptive +pq view push ts-6,ts-7 # Intuitive +pq status # Expected +``` + +### Awkward Commands ⚠️ + +- **Piping to head:** `pq thread samples | head -n 100` - shouldn't need shell plumbing for basic limiting +- **Filtering not built-in:** Must use `grep` externally +- **No inline thread selection:** `pq thread samples --thread t-93` doesn't work, must select first + +### Missing Commands ❌ + +```bash +pq thread markers # Not implemented +pq thread waits # Not implemented +pq thread stacks # Not implemented +pq function info # Not implemented +pq view compare # Not implemented +pq thread functions # Not implemented (list top functions only, no tree) +``` + +## Handling of Missing Symbols + +The profile includes AMD GPU driver code (`atidxx64.dll`) with no symbols. pq handled this **functionally** but **poorly for UX**: + +### What Works ✓ + +- Functions are assigned placeholder names (fun_3e8f0) +- Percentages are calculated correctly +- Call stacks show the unsymbolicated frames +- Module name (atidxx64.dll) is preserved + +### What's Broken 🔶 + +- **No indication these are unsymbolicated** - looks like real function names +- **No module-level grouping** - can't easily see "25% in AMD driver" +- **No hints about why** - is this expected? Is symbolication available? +- **Addresses are obfuscated** - fun_3e8f0 doesn't show the actual address (0x3e8f0) + +### Impact on Investigation + +Despite missing symbols, I could still: + +- ✓ Identify that GPU driver code was hot (atidxx64.dll functions in top list) +- ✓ See that it was called from D3D11/DXGI Present operations +- ✓ Quantify the time spent (27% in spike periods) + +But I couldn't: + +- ❌ Understand _what_ the driver was doing (memory allocation? rendering? waiting?) +- ❌ Distinguish different driver functions (fun_3e8f0 vs fun_a56960 - which is which?) +- ❌ Know if this is normal or indicates a problem + +### Recommendation + +``` +atidxx64.dll!<0x3e8f0> [unsymbolicated] - total: 2354 (16.3%) + Note: AMD GPU Driver - symbols unavailable + +Or group in output: + GPU Driver Activity (unsymbolicated): 25.4% total + atidxx64.dll!<0x3e8f0>: 16.3% + atidxx64.dll!<0xa56960>: 4.1% + atidxx64.dll!<0xa48860>: 1.6% +``` + +## Performance Profile Summary + +### The Problem + +Firefox was experiencing repeated CPU spikes (160% = 2 cores) every few hundred milliseconds, lasting 200-400ms each. + +### Root Cause + +1. **Main thread:** Continuous WM_PAINT message loop +2. **Main thread:** Triggers rendering via OnPaint → ProcessPendingUpdates +3. **Main thread:** Blocks waiting for GPU (PCompositorBridge::Msg_FlushRendering) +4. **GPU Renderer thread:** Processes frame rendering (WebRender) +5. **GPU Renderer thread:** 27% of spike time spent in DirectComposition Present operations +6. **Repeat:** Pattern repeats every ~300ms + +### Bottleneck + +The GPU Present path (DirectComposition → DXGI → AMD driver) is the bottleneck during spikes. The main thread is waiting for these Present operations to complete before continuing. + +### Likely Issue + +Either: + +- **VSync blocking:** Waiting for monitor refresh before presenting +- **GPU saturation:** AMD driver queueing work faster than GPU can execute +- **Desktop Window Manager contention:** Windows DWM compositing is slow + +## Overall Assessment + +### pq Strengths 💪 + +1. **Progressive exploration** model is natural and effective +2. **Time range navigation** (timestamps + view stack) is excellent +3. **Thread selection** with handles is simple and memorable +4. **Profile overview** immediately surfaces hot spots +5. **Consistent command structure** reduces learning curve + +### pq Weaknesses 😓 + +1. **Output verbosity** makes wide-scope views painful +2. **No filtering or search** forces manual grepping +3. **Missing features:** No markers, no waits, no stacks, no comparison +4. **Poor symbol UX:** Unsymbolicated code looks like real function names +5. **Call tree format** is hard to parse at depth + +### Would I Use pq for Real Investigations? + +**Yes, but with caveats:** + +**For quick triage:** ✓ Excellent - `profile info` + `thread select` + targeted `view push` works great + +**For deep investigation:** ⚠️ Frustrating - need to: + +- Pipe through `head` constantly to manage output +- Keep the profiler UI open for markers, stacks, and visual navigation +- Manually grep for function names +- Copy/paste outputs for comparison + +**pq is currently a "first-look tool"** - great for initial exploration, but you'll switch to the profiler UI for serious debugging. + +## Priority Improvements + +### P0 (Critical for Real Use) + +1. **Add `--limit` flag** to all commands that generate lists +2. **Implement marker viewing** (thread markers is wired up but not functional) +3. **Add function search/filter** (`pq thread functions --filter "Present"`) + +### P1 (High Value) + +4. **Improve call tree display** (limit depth, better formatting) +5. **Mark unsymbolicated functions clearly** +6. **Add module-level grouping** for unsymbolicated code + +### P2 (Nice to Have) + +7. **Add stack/sample viewing** (heaviest stacks) +8. **Add wait analysis** (thread waits) +9. **Add comparison** (view compare) +10. **Add inline thread selection** (`--thread` flag on all commands) + +## Comparison with Case Study 1 + +Both case studies investigated **the same profile** (https://share.firefox.dev/4oLEjCw) and reached remarkably **consistent conclusions**, validating the findings: + +### Identical Core Issues ✓ + +1. **Missing library/module context** - Both flagged this as the #1 critical problem +2. **Excessive output truncation** - Call trees, function lists, heaviest stacks all cut off too early +3. **Output verbosity** - Full profile views are overwhelming +4. **Missing marker support** - Identified as a major gap +5. **Same performance diagnosis** - Both found GPU rendering with repeated Present operations + +### Converging Recommendations ✓ + +Both case studies independently proposed: + +- **Time range format flexibility** - Support seconds (2.7,3.1) not just timestamp names +- **Function search/filtering** - Need to find specific functions +- **Deeper output limits** - Show more functions, more tree depth, more frames +- **Status command** - Show current context (thread, range, session) +- **Separate sample commands** - Split `thread samples` into focused views + +### Key Disagreements 🤔 + +**Function Handles:** + +- **Case Study 1 proposed:** Function handles like `f-234` for brevity +- **Case Study 2 (this):** Rejected handles as cognitive overhead; prefer smart truncation + +**Analysis:** I agree with Case Study 2 (my own conclusion). Function handles add indirection ("what was f-234 again?") and break copy/paste workflows. Smart truncation achieves the same brevity without the cognitive tax. + +**Command naming:** + +- **Case Study 1:** `view push-range` (explicit) +- **Case Study 2:** `view push` (concise) + +**Analysis:** Both work. I slightly prefer `view push` for brevity, but consistency with other `push-X` commands could justify `push-range`. Not a strong opinion. + +### Unique Insights + +**From Case Study 1:** + +- Detailed design recommendations section (§1-§9) +- Proposed named ranges (`spike:1`, `longest-frame`) +- Identified negative nanosecond timestamp bug in `view pop` +- Provided implementation timeline estimates (2-10 weeks to production ready) + +**From Case Study 2 (this):** + +- Cognitive load assessment framework (low/high cognitive load categories) +- "First-look tool" vs "primary investigation tool" distinction +- More focus on real-world workflow pain points +- Specific call-out of AMD driver symbol handling +- Emphasis on filtering as a solution to verbosity + +### Validation + +The **high degree of overlap** between independent investigations of the same profile demonstrates: + +1. ✅ The issues are real and reproducible +2. ✅ The proposed solutions are well-aligned +3. ✅ The priority rankings are consistent (module names > truncation > markers) +4. ✅ The overall assessment is reliable ("promising foundation with critical gaps") + +## Conclusion + +pq is a **promising tool** that successfully enables command-line profile investigation. The core workflow is solid, and for focused investigations (zoomed into specific time ranges), it's quite effective. + +However, **output verbosity and missing features** significantly limit its utility for complex investigations. Adding filtering, limiting, and marker viewing would transform pq from a "triage tool" into a "primary investigation tool." + +The handling of unsymbolicated code is **functional but needs UX work** - it's not a blocker, but better clarity would help users understand what they're looking at. + +**Bottom line:** pq has excellent bones, but needs refinement to handle the scale and complexity of real-world performance profiles. + +**Cross-validation with Case Study 1:** The independent investigation reached nearly identical conclusions, confirming these findings are robust and actionable. diff --git a/pq-case-study.md b/pq-case-study.md new file mode 100644 index 0000000000..eb648991a3 --- /dev/null +++ b/pq-case-study.md @@ -0,0 +1,718 @@ +# Profile Query CLI (pq) - Case Study Report + +## Profile Investigation Summary + +**Profile:** https://share.firefox.dev/4oLEjCw (Firefox 146 on Windows 11) + +### Findings + +The profile shows **bursty rendering activity** rather than sustained performance issues. Key observations: + +1. **Thread CPU Distribution:** + - GPU process (p-14): 16.1s total CPU time + - Renderer thread (t-93): 7.9s (26% active, 63.5% idle overall) + - WRWorker threads: ~1.5s each (88-95% idle) + - Parent Process GeckoMain (t-0): 7.9s (42% waiting, 26% sleeping) + +2. **Activity Pattern:** + - Baseline: 81% CPU utilization across ~30 seconds + - Spikes: 160% CPU (2 cores saturated) in bursts of 200-1000ms + - Most threads spend majority of time idle waiting for work + +3. **Active Work Breakdown:** + - GPU Renderer: 7.5% checking device state (`WaitForFrameGPUQuery`), ~9% waiting on GPU operations + - Main thread: Blocked on IPC (`SendFlushRendering`), waiting for compositor responses + - WRWorker threads: Skia rendering (`SkRasterPipelineBlitter`, path operations) + - **Limited visibility:** ~15% of time in unsymbolicated GPU driver functions (`fun_a56960`, etc.) - can't determine which library without module names + +**Diagnosis:** This is not a "slow" profile - it's a profile of normal responsive rendering with expected idle time. The system waits appropriately between frames and for GPU operations to complete. No obvious bottleneck identified, though GPU driver work (which accounts for a significant portion of time) cannot be fully characterized without library/module context. + +--- + +## pq Usability Evaluation + +### What Worked Well + +**1. Fast Profile Loading** +Loading a remote profile from share.firefox.dev was smooth and quick. The daemon model works well. + +**2. Progressive Exploration** +The workflow of `profile info` → `thread select` → `thread samples` felt natural for drilling down into threads. + +**3. View Range Zooming** +`view push-range ts-6,ts-7` successfully filtered to spike periods. The concept of pushing/popping ranges is solid. + +**4. Command Consistency** +Commands follow predictable patterns: `thread select`, `thread info`, `thread samples`. Easy to remember. + +**5. Output Quality - Thread Info** +The CPU activity timeline with indented percentages is excellent: + +``` +- 26% for 30404.1ms (14464 samples): [ts-2,ts-Yz] + - 40% for 4829.3ms (2238 samples): [ts-5,ts-Fa] + - 60% for 161.3ms (73 samples): [ts-FX,ts-FY] +``` + +This nested view clearly shows when and where CPU spikes occur. + +--- + +### Critical Issues + +**1. Missing Library/Module Names** +The single biggest problem. Output shows bare function names without context: + +- `fun_a56960`, `fun_a48860`, `fun_1159e6` - which library are these from? +- `0x7ffdbb3c8055`, `0x13c9bd2dcf1` - what module contains these addresses? + +**Context:** Graphics drivers often don't provide symbol information, so function names like `fun_a56960` are expected. However, the web UI shows **which library** the function is in (e.g., `nvoglv64.dll`, `amdvlk64.dll`, `d3d11.dll`), which is crucial for diagnosis. + +**Impact:** Cannot tell if time is spent in: + +- GPU driver code (expected for rendering) +- System libraries (might indicate OS contention) +- Unknown/JIT code (might indicate JavaScript or corrupted stacks) +- Third-party DLLs (might indicate extension issues) + +Even without function symbols, knowing "14% of time in GPU driver" vs "14% in unknown code" is the difference between actionable insight and confusion. + +**Needed:** + +- Show library/module names for all functions: `nvoglv64.dll!fun_a56960` +- Group by module in top functions: "15% in nvoglv64.dll (GPU driver)" +- Annotate unknown addresses with their module when available +- Special handling for JIT code addresses (mark as "JS JIT" if from SpiderMonkey heap) + +**2. Truncated Call Trees** +Regular call trees are cut off early, showing only 10 levels before "..." when there are clearly more levels. Example: + +``` +└─ └─ └─ └─ └─ └─ └─ └─ └─ └─ MessageLoop::Run() [total: 100.0%, self: 0.0%] +``` + +Then it just stops, even though there's clearly more interesting work below. + +**Impact:** Cannot see the actual work being done, only the dispatch machinery. + +**Needed:** + +- Show more levels by default (at least 20-30) +- Add a parameter to control depth: `--max-depth=50` or `--full-tree` +- Smart truncation: continue showing branches with >5% self time + +**3. Timestamp Display Issues** +After `view pop-range`, timestamps were shown as: + +``` +ts<78 (-3,703,142,204,026ns) to ts<79 (-3,702,751,569,159ns) +``` + +**Problems:** + +- Negative nanosecond values are meaningless to users +- Should show relative times like "2.701s to 3.092s" as push-range did +- Inconsistent between push and pop + +**4. Limited Function List** +"Top Functions" shows only 20 functions, then: + +``` +... (6603 more functions omitted, max total: 16392, max self: 6916, sum of self: 2818) +``` + +**Impact:** Cannot see secondary bottlenecks. If top function is 42% waiting (expected), I need to see what the other 58% is doing. + +**Needed:** + +- Show at least top 50 functions +- Add `--limit=N` parameter +- Better filtering: `--min-self-time=1%` to hide trivial functions + +**5. Heaviest Stack Truncation** +The heaviest stack shows "... (42 frames skipped)" in the middle: + +``` + 20. NS_ProcessNextEvent(nsIThread*, bool) + ... (42 frames skipped) + 63. nsWindow::WindowProcInternal(...) +``` + +**Impact:** Cannot see the full execution path. The skipped frames are often the most important. + +**Needed:** Never skip frames in "heaviest stack" - it's only one stack, show all frames. + +--- + +### Missing Features + +**1. Marker Support** +Commands exist in help text (`thread markers`) but aren't implemented. Markers are crucial for understanding: + +- Layout/style/reflow costs +- JavaScript function names +- IPC message types +- GPU command boundaries + +**Impact:** Major gap. Half of profiling insight comes from markers. + +**2. Time Range Selection by Content** +No way to find "ranges where thread X is >80% active" or "show me the longest frame". Currently must: + +- Read profile info manually +- Copy timestamp names +- Push range manually + +**Needed:** + +- `view find-spikes --thread t-93 --min-cpu=80%` +- `view find-longest-frame` +- `view show-frame 42` (jump to Nth frame) + +**3. Cross-Thread Analysis** +No way to see what multiple threads were doing during the same time range. Had to manually: + +- Push range for spike period +- Select thread, view samples +- Select another thread, view samples +- Mentally correlate + +**Needed:** + +- `thread compare t-0 t-93` showing both threads side-by-side +- `profile samples --all-threads` during current view range + +**4. Function Listing/Search** +No way to search for specific functions. Wanted to find all places where `nsTreeImageListener::AddCell` appears (saw it used 0.3% CPU), but had to scroll through output. + +**Needed:** + +- `thread functions` to list all functions with CPU time +- `thread functions -E "nsTree"` to filter with regex (see Design Recommendations §6) + +**5. JavaScript-Specific Commands** +No way to view just JavaScript execution: + +- Filter to JIT frames vs C++ frames +- See hot JavaScript functions +- Understand script URLs + +**6. Export/Save** +No way to save investigation results. Had to pipe to `head` manually. Would want: + +- `thread samples --output=report.txt` +- `profile export --format=json` for scripting + +--- + +### Cognitive Load Assessment + +**Learning Curve: Low ✓** + +- Commands are intuitive if you understand profiling concepts +- Help text is clear +- Predictable command structure + +**Mental Model: Good ✓** + +- Daemon/client separation is invisible (good) +- Thread selection persists across commands (good) +- View range stack metaphor is clear + +**Context Switching: Moderate** + +- Remembering timestamp names (ts-6, ts-7) is awkward +- Have to remember which thread is selected +- No way to see "current state" - need `pq status` showing: + - Current session + - Selected thread + - Current view range stack + +**Memory Burden: High** + +- Timestamp names are opaque (ts-6 vs ts-FX) +- Must remember findings from previous commands +- No way to annotate or save intermediate results + +--- + +### Output Quality + +**Profile Info: Excellent ✓✓✓** + +- Clear hierarchy (processes → threads) +- CPU percentages make relative costs obvious +- Timeline sections show burst patterns +- Top threads immediately visible + +**Thread Info: Excellent ✓✓✓** + +- Nested CPU activity is perfect for finding spikes +- Sample counts + durations both shown +- Thread lifecycle (created/ended) useful + +**Thread Samples: Good but Limited** + +- Top functions by total/self time is standard profiler output +- Inverted call tree is useful +- Heaviest stack helps identify primary path + +**Problems:** + +- Too much truncation (as detailed above) +- No percentage filter (hide <1% functions) +- Call tree depth insufficient +- Missing symbols are jarring + +--- + +### Ergonomics + +**Command Length: Mixed** + +- Short commands are nice: `thread info`, `profile info` +- Thread handles work well: `t-93` is concise +- Timestamp ranges are verbose: `view push-range ts-6,ts-7` + - **Addressed in Design Recommendations §2**: support `view push 2.7,3.1` (seconds) + +**Discoverability: Good ✓** + +- `--help` shows all commands +- Error messages are clear +- Command structure is guessable + +**Error Recovery: Needs Work** + +- No undo for thread selection (minor) +- Can't peek at view range without pushing +- No validation of timestamp names before pushing + +**Workflow Efficiency:** + +- Too many steps to compare threads during a spike +- No way to iterate quickly through interesting ranges +- Must manually correlate information across commands + +--- + +### Design Recommendations + +This section addresses key design questions that arose during the case study. + +#### 1. Timestamp Display: Always Show Both + +**Current issue:** Compact names (ts-6) are opaque; long timestamps are hard to remember. + +**Recommendation:** Show both everywhere: + +``` +Pushed view range: ts-6 (2.701s) to ts-7 (3.092s) +Popped view range: ts-6 (2.701s) to ts-7 (3.092s) +``` + +**Benefits:** + +- Compact names for scripting/reference: `view push ts-6,ts-7` +- Human-readable context for understanding +- Consistency between push and pop + +#### 2. Timestamp Range Input: Support Multiple Formats + +**Current issue:** "ts-6,ts-7" is verbose - requires copying from profile info output. + +**Recommendation:** Accept multiple formats, parse intelligently: + +```bash +view push ts-6,ts-7 # Timestamp names (current) +view push 2.7,3.1 # Relative seconds (new, most ergonomic) +view push 2.7s,3.1s # Explicit unit (new) +view push 2700ms,3100ms # Milliseconds (new) +view push 10%,20% # Percentage through profile (new) +``` + +**Default unit:** Seconds (most natural) + +**Benefits:** + +- Fast iteration: `view push 2.7,3.1` is much shorter than `view push ts-6,ts-7` +- Intuitive: "zoom into 2.7 to 3.1 seconds" is clear +- Backward compatible: timestamp names still work +- Scriptable: can compute times programmatically + +**Implementation note:** Detect format by pattern - if contains `ts-`, use name lookup; if numeric, parse as time; if contains `%`, parse as percentage. + +#### 3. Separate Commands for Sample Views + +**Current issue:** `thread samples` dumps everything. Flags like `--limit` or `--min-self-time` would apply to all sections, which is awkward. + +**Recommendation:** Split into focused commands: + +```bash +thread samples-top [--limit=N] [--min-self=1%] [--by={total|self}] + # Just top functions by total/self time + +thread samples-tree [--max-depth=N] [--min-percentage=1%] + # Just regular call tree + +thread samples-inverted [--max-depth=N] [--min-percentage=1%] + # Just inverted call tree + +thread samples-heaviest [--no-skip | --max-frames=N] + # Just heaviest stack + +thread samples # Keep for backward compatibility + # All views (current behavior) +``` + +**Benefits:** + +- Each view has appropriate parameters +- Faster output when you only need one view +- More composable with shell tools (`| grep`, `| less`) +- Can set sensible per-view defaults + +**Alternative considered:** `thread samples --view=top --limit=50` + +- Rejected: less ergonomic, harder to discover views + +#### 4. Heaviest Stack Truncation: Increase Cap with Safety Limit + +**Current issue:** 27 frames shown, 42 skipped - way too aggressive. + +**Recommendation:** + +- Default: Show up to **200 frames** (covers 99% of real stacks) +- Safety: Cap at **500 frames** to prevent terminal flooding from infinite recursion +- Flag: `--max-frames=N` to override +- Never skip in the middle - if truncated, show first N frames with clear message: + ``` + ... (300 more frames omitted - use --max-frames to see all) + ``` + +**Rationale:** + +- 200 frames handles even deep template/async stacks +- 500 frame safety net catches bugs +- Skipping frames in the middle destroys diagnostic value + +#### 5. Function Names: Smart Truncation, No Handles + +**Issue:** Long C++ names with templates are verbose and hard to scan. + +**Recommendation: Smart truncation without handles** + +Function handles (`f-234`) add cognitive overhead and indirection. Instead: + +**Length cap: 100 characters** with smart truncation: + +``` +# Original (150 chars): +std::_Hash,std::equal_to>,std::allocator>,0>>::~_Hash() + +# Truncated (100 chars): +std::_Hash>::~_Hash() +``` + +**Rules:** + +1. Keep module/library name: `nvoglv64.dll!` always shown +2. Keep actual function name: `~_Hash()` always shown +3. Truncate middle of namespaces/templates: `...` +4. Preserve enough to be unique in context + +**For call trees:** Even more aggressive (60 char limit) since indentation eats space: + +``` +mozilla::wr::RenderThread::UpdateAndRender(...) +``` + +**Benefits:** + +- No cognitive overhead of handle indirection +- Still readable at a glance +- No need for separate lookup command +- Can copy/paste into search + +**Alternative considered:** Function handles like `f-234` + +- Rejected: requires mental mapping, breaks copy/paste, adds complexity + +#### 6. Function Search: Use `thread functions` with Grep Patterns + +**Current issue:** No way to search for functions. + +**Recommendation:** + +```bash +thread functions # List all functions with CPU time +thread functions -E "nsTree" # Regex filter (like grep -E) +thread functions -i "layout" # Case-insensitive (like grep -i) +thread functions --min-self=1% # Only functions with >1% self time +``` + +**Output format:** + +``` +Functions in thread t-0 (GeckoMain): + 42.2% ZwUserMsgWaitForMultipleObjectsEx + 26.4% ZwWaitForAlertByThreadId + 8.1% NtUserMessageCall + 1.3% memset + ... (showing 45 of 6623 functions) +``` + +**Benefits:** + +- Familiar grep-style interface +- Composable: can still pipe to grep for more filtering +- Consistent with ripgrep conventions + +**Name:** `thread functions` (not `thread search`) because it's listing/filtering functions, not searching arbitrary text. + +#### 7. Command Structure: Clarify State vs Time Range + +**Current issue:** Inconsistency between `thread select` and `view push-range` - both change "view state" but use different command prefixes. + +**Recommendation: Separate concerns clearly** + +Two types of state: + +1. **Thread selection** - which thread to analyze +2. **Time range** - which time window to analyze + +**Proposed structure:** + +```bash +# Thread selection +thread select t-93 # Select thread +thread info # Info for selected thread +thread samples-top # Samples for selected thread + +# Time range (keep "view" for time, since it's the "view" into the timeline) +view push ts-6,ts-7 # Push time range +view pop # Pop time range +view clear # Clear all ranges (back to full profile) +view list # Show range stack + +# Status (what's my current context?) +status # Show session, selected thread, range stack + # Output: + # Session: ttzltpqjsi (profile: https://share.firefox.dev/4oLEjCw) + # Thread: t-93 (Renderer) + # View ranges: [ts-6 (2.701s) → ts-7 (3.092s)] +``` + +**Alternative considered:** `time-range push` instead of `view push` + +- Rejected: "view" is shorter, intuitive (you're changing your view of the timeline) +- "time-range" is verbose and awkward + +**Alternative considered:** `view` command shows status + +- Rejected: `view push/pop/clear` makes `view` ambiguous (verb vs noun) +- Better to have explicit `status` command + +**Benefits:** + +- Clear separation: `thread` = which thread, `view` = which time +- Consistent: all state changes are explicit commands +- `status` shows everything at once + +#### 8. Function Name Repetition: Acceptable with Module Context + +**Issue:** Function names appear many times in call tree output. + +**Analysis:** This is actually fine and expected: + +- Call trees inherently repeat names (parent nodes) +- Module prefixes (`nvoglv64.dll!`) add context, not noise +- Truncation (rule #5) keeps length manageable +- Terminal scrollback handles repetition well + +**No action needed.** The proposed module display and truncation rules are sufficient. + +#### 9. View Range Ergonomics: Range Names for Common Patterns + +**Additional idea:** For common access patterns, support named ranges: + +```bash +view push spike:1 # First detected CPU spike >80% +view push spike:next # Next spike after current range +view push frame:5 # 5th vsync frame (if markers present) +view push longest-frame # Longest frame in profile +``` + +**Implementation:** These would be computed on-demand, not persisted. + +**Benefits:** + +- Very fast exploration: "show me the spikes" +- No need to manually parse profile info output +- Great for CI/CD: "report on longest frame" + +**Priority:** Medium (do after basic time format support) + +--- + +### Specific Improvements Needed + +See **Design Recommendations** section above for detailed proposals on command structure, timestamp formats, and function display. + +**High Priority:** + +1. **Show library/module names** - essential context even without symbols (Design Rec. §1) +2. **Fix timestamp display** - show both compact name and readable time (Design Rec. §1) +3. **Support time formats** - accept seconds, ms, % in addition to timestamp names (Design Rec. §2) +4. **Separate sample commands** - `thread samples-top`, `samples-tree`, etc. (Design Rec. §3) +5. **Deeper call trees** - show 30+ levels by default, cap at 200 for call trees +6. **Fix heaviest stack truncation** - show up to 200 frames, never skip middle (Design Rec. §4) +7. **Implement markers** - huge gap in functionality + +**Medium Priority:** + +8. **Status command** - show session/thread/range state (Design Rec. §7) +9. **Function listing** - `thread functions -E "pattern"` (Design Rec. §6) +10. **Smart function truncation** - 100 char cap, preserve module + function name (Design Rec. §5) +11. **Cross-thread views** - compare threads during same range +12. **Named ranges** - `view push spike:1`, `longest-frame` (Design Rec. §9) + +**Low Priority:** + +13. **Export results** - save to file +14. **Progress indicators** - loading large profiles +15. **Color output** - highlight high percentages in output + +--- + +### Comparison to Web UI + +**pq Advantages:** + +- Much faster for quick triage +- Easy to script/automate +- Lower memory usage +- Works over SSH +- Can process many profiles in batch + +**Web UI Advantages:** + +- Visual timeline shows everything at once +- Mouse hover reveals details instantly +- Can see multiple threads simultaneously +- Marker tooltips show rich information +- Source view integration +- Network panel, memory tracks, etc. + +**Ideal Use Cases for pq:** + +- Quick "what's slow?" triage +- CI/CD performance monitoring +- Batch analysis of many profiles +- Server-side investigation (no GUI) +- Extracting specific data for reports + +**Where pq Falls Short:** + +- Complex multi-thread timing issues +- Understanding frame scheduling +- Correlating markers with samples +- Visual pattern recognition +- Exploratory analysis without hypothesis + +--- + +### Overall Assessment + +**Current State: Promising Foundation (60% there)** + +pq successfully demonstrates that CLI profiling is viable and valuable. The core architecture (daemon model, thread selection, view ranges) is sound. For profiles where you can identify which libraries are consuming time and single-threaded bottlenecks, it works reasonably well. + +**Critical Gaps:** + +- Library/module context is essential - without it, functions are unidentifiable blobs +- Output truncation hides too much information +- Missing marker support eliminates half of profiling value + +**Recommendation:** +Fix the three critical gaps above before adding new features. A tool that shows incomplete information (truncated trees, missing module context, no markers) frustrates users more than missing features. + +**Potential:** +If library names, depth, and markers are addressed, pq could become the standard first-response tool for performance issues. "Run pq first, open web UI if needed" would be a great workflow. + +**Estimated to "Production Ready":** + +- With critical fixes: 2-3 weeks +- With medium priority features: 4-6 weeks +- With low priority polish: 8-10 weeks + +The foundation is solid. The gaps are addressable. The value proposition is clear. + +--- + +### Summary of Key Design Decisions + +Based on the case study investigation, here are the recommended design directions: + +**1. Command Structure** (Design Rec. §7) + +```bash +thread select t-93 # Select which thread +thread samples-top # View top functions (separate commands per view) +view push 2.7,3.1 # Push time range (view = time window) +status # Show current state +``` + +- `thread` for thread operations, `view` for time ranges, `status` for context +- No function handles (f-234) - use smart truncation instead + +**2. Time Range Input** (Design Rec. §2) + +```bash +view push ts-6,ts-7 # Timestamp names (keep for compatibility) +view push 2.7,3.1 # Seconds (NEW, default - most ergonomic) +view push 2700ms,3100ms # Milliseconds (NEW) +view push 10%,20% # Percentage (NEW) +``` + +- Always display both: "ts-6 (2.701s)" in output + +**3. Sample View Commands** (Design Rec. §3) + +- Separate commands: `samples-top`, `samples-tree`, `samples-inverted`, `samples-heaviest` +- Each has appropriate flags: `--limit`, `--max-depth`, `--min-self`, etc. +- Keep `thread samples` for backward compatibility (shows all) + +**4. Function Display** (Design Rec. §5) + +- Show module names: `nvoglv64.dll!fun_a56960` +- Smart truncation: 100 chars max, preserve module + function name +- Call trees: 60 chars (indentation eats space) +- No function handles + +**5. Output Limits** + +- Top functions: Show 50 by default (was 20) +- Call tree depth: Show 30+ levels by default (was ~10) +- Heaviest stack: Show 200 frames (was 27), never skip middle +- Safety cap: 500 frames max to catch infinite recursion + +**6. Function Search** (Design Rec. §6) + +```bash +thread functions # List all with CPU% +thread functions -E "nsTree" # Regex filter (grep-style) +``` + +**7. Status/Context** + +```bash +status # Show session, selected thread, view range stack +``` + +These decisions prioritize: + +- **Ergonomics**: `view push 2.7,3.1` is much faster than `view push ts-6,ts-7` +- **Consistency**: Clear separation between `thread` (which) and `view` (when) +- **Readability**: Module names and smart truncation over handles +- **Composability**: Separate commands work better with pipes/scripts +- **Discoverability**: Grep-style flags, clear command names diff --git a/pq-filters-and-bookmarks-proposal.md b/pq-filters-and-bookmarks-proposal.md new file mode 100644 index 0000000000..de3fb18408 --- /dev/null +++ b/pq-filters-and-bookmarks-proposal.md @@ -0,0 +1,750 @@ +# pq Filters and Bookmarks Proposal + +**Status:** Proposal (revised based on feedback) +**Created:** 2025-01-04 +**Last Updated:** 2025-01-04 +**Related:** pq-todo.md + +--- + +## Key Design Decisions + +Based on feedback, this proposal includes: + +1. ✅ **Consistent terminology** - `push/pop/clear` for zoom and filters, `load/unload` for bookmarks +2. ✅ **Multi-thread selection** - `thread select t-0,t-93` works for sticky state +3. ✅ **Clear OR vs AND** - `-any` suffix for OR, repeated flags for AND +4. ✅ **Per-profile bookmarks** - scoped to current profile, not global +5. ✅ **Zoom validation** - nested ranges must be contained within parent +6. ✅ **Unified naming** - `--includes-prefix` instead of `--starts-with-function`/`--starts-with-sequence` +7. ✅ **Balanced zoom syntax** - `zoom push ts-6,ts-7` and `zoom push --marker m-158` +8. ✅ **Single filter stack** - one ordered stack for sample/stack filters (order matters for dependencies) +9. ✅ **Prefix means exact sequence** - `--includes-prefix A,B,C` means starts with A→B→C exactly +10. ✅ **Per-thread filter stacks** - each thread has its own filter context (handles are thread-specific!) +11. ✅ **Separate marker filters** - marker display filtering independent from sample/stack filtering + +--- + +## Overview + +This proposal defines a comprehensive system for managing analysis state in pq with **four independent dimensions**: + +1. **Thread selection** (global) - which thread(s) you're analyzing +2. **Zoom** (global) - time range you're focused on +3. **Sample/Stack filters** (per-thread) - how to filter and transform samples +4. **Marker filters** (per-thread) - how to filter marker display + +Each dimension supports: + +- **Ephemeral use** - apply once via flags +- **Sticky state** - persists across commands via push/pop/clear +- **Bookmarks** - save and restore complex views + +--- + +## Design Principles + +1. **Ephemeral by default** - All commands accept filter flags that apply only to that invocation +2. **Explicit stickiness** - Making state sticky requires explicit commands (`select`, `zoom push`, `filter add`) +3. **Clear state** - `pq status` always shows current thread, zoom, and active filters +4. **Composable** - Filters, zoom, and thread selection are independent dimensions +5. **Saveable** - Complex views can be bookmarked and recalled + +--- + +## Core Syntax + +### 1. Ephemeral Filters (Flags) + +All commands accept filter flags that apply only to that command: + +```bash +# Ephemeral thread selection +pq thread samples --thread t-93 +pq thread markers --threads t-0,t-93 + +# Ephemeral zoom +pq thread samples --zoom ts-6,ts-7 +pq thread markers --zoom m-158 + +# Ephemeral sample filters +pq thread samples --includes-any-function PresentImpl,FlushD3D11 +pq thread samples --during-marker --search Paint +pq thread samples --includes-prefix f-1,f-2,f-3 + +# Ephemeral stack transforms +pq thread samples --merge malloc,free,arena_dalloc +pq thread samples --root-at f-142 +pq thread samples --strip-prefix f-1,f-2,f-3 + +# Combinations work +pq thread samples --thread t-93 --zoom ts-6,ts-7 --merge malloc --limit 20 +``` + +### 2. Sticky Thread Selection + +```bash +pq thread select t-93 # Select single thread (sticky) +pq thread samples # Uses t-93 + +pq thread select t-0,t-93 # Select multiple threads (sticky) +pq thread samples # Uses both threads + +pq thread select t-0 # Switch to different thread +``` + +### 3. Sticky Zoom (Stack-based) + +```bash +pq zoom push ts-6,ts-7 # Push zoom level +pq thread samples # Uses zoomed range + +pq zoom push ts-6a,ts-6c # Zoom further (within previous range) +pq thread samples # Uses nested zoom + +pq zoom pop # Pop one zoom level (back to ts-6,ts-7) +pq zoom pop # Pop again (back to full profile) + +pq zoom clear # Clear entire zoom stack + +# Convenience forms +pq zoom push m-158 # Zoom to marker's time range (✅ implemented) +pq zoom push --spike 1 # Zoom to first CPU spike (future feature) +``` + +### 4. Sticky Sample/Stack Filters (Per-Thread) + +Each thread has its own filter stack. Filters are **scoped to the current thread** because function handles (f-142) are thread-specific. + +```bash +# Select thread first +pq thread select t-93 + +# Push filters onto THIS THREAD's stack +# Each filter is applied in push order + +# Sample filters (which samples to include) +pq filter push --includes-any-function f-142,f-143 # Filter 1: includes f-142 OR f-143 +pq filter push --includes-function f-142 # Filter 2: AND (must also have f-142) +pq filter push --includes-function f-200 # Filter 3: AND (must also have f-200) + +pq filter push --during-marker --search Paint # Filter 4: during Paint markers +pq filter push --includes-prefix f-100,f-200 # Filter 5: stack starts f-100→f-200 +pq filter push --includes-suffix f-142 # Filter 6: stack ends with f-142 + +# Inverse sample filters (exclude) +pq filter push --excludes-any-function malloc,free # Exclude samples +pq filter push --outside-marker --search GC + +# Stack transform filters (modify stacks before sample filtering) +pq filter push --strip-prefix f-1,f-2,f-3 # Strip then sample-filter +pq filter push --merge malloc,free,arena_dalloc # Merge away allocators +pq filter push --merge-regex "^(malloc|free|moz_x)" +pq filter push --root-at f-142 # Re-root stacks +pq filter push --strip-suffix f-999 + +# Order matters! Example: +pq filter push --strip-prefix A,B,C # 1. Strip A→B→C from stacks +pq filter push --includes-prefix D # 2. Then filter by prefix D +# Filter 2 sees stacks AFTER filter 1 has transformed them + +# Management +pq filter list # Show filters for current thread +pq filter pop # Pop most recent filter +pq filter pop 3 # Pop 3 most recent filters +pq filter clear # Clear all filters for current thread + +# Switch threads - different filter stack! +pq thread select t-0 +pq filter list # t-0's filters (independent from t-93) + +# Filters apply to sample analysis commands +pq thread samples # Uses current thread's filters +``` + +### 5. Sticky Marker Filters (Per-Thread) + +Each thread also has its own marker filter stack, independent from sample/stack filters. + +```bash +# Select thread +pq thread select t-0 + +# Push marker display filters onto THIS THREAD's marker filter stack +pq marker filter push --search Paint +pq marker filter push --category Graphics +pq marker filter push --min-duration 5 +pq marker filter push --max-duration 100 +pq marker filter push --has-stack + +# Marker filters affect marker display +pq thread markers # Shows only filtered markers + +# Management +pq marker filter list # Show marker filters for current thread +pq marker filter pop # Pop most recent marker filter +pq marker filter clear # Clear all marker filters for current thread + +# Switch threads - different marker filter stack! +pq thread select t-93 +pq marker filter list # t-93's marker filters (independent from t-0) + +# Marker filters only affect marker display, not sample analysis +pq thread samples # NOT affected by marker filters +pq thread markers # Affected by marker filters +``` + +**Note:** Sample/stack filters that reference markers (e.g., `--during-marker --search Paint`) operate on the **unfiltered** marker set. Marker filters only affect display, not sample filtering logic. + +### 6. Bookmarks + +Bookmarks save complex views for later recall. They are **per-profile** (scoped to the current session's profile). + +```bash +# Create bookmarks +pq bookmark view spike1 --zoom ts-6,ts-7 --threads t-0,t-93 +pq bookmark filter no-allocators --merge malloc,free,arena_dalloc + +# Use bookmarks ephemerally +pq thread samples --view spike1 +pq thread samples --filter no-allocators +pq thread samples --view spike1 --filter no-allocators + +# Load bookmarks (make sticky) +pq bookmark load view spike1 # Sets zoom + threads sticky +pq bookmark load filter no-allocators # Applies filters sticky + +# Management +pq bookmark list # Show all bookmarks for current profile +pq bookmark info spike1 # Show bookmark details +pq bookmark delete spike1 # Remove bookmark +pq bookmark export spike1 > spike1.json # Export for sharing (future) +pq bookmark import spike1.json # Import bookmark (future) +``` + +### 7. Status Command + +Always shows current state across all dimensions: + +```bash +pq status + +# Output: +Session: 3ugy6phmzqc +Profile: https://share.firefox.dev/4oLEjCw +Thread: t-93 (Renderer) +Zoom: ts-6,ts-7 (2.701s - 3.091s) + └─ parent: full profile (0s - 30.5s) + +Sample/Stack Filters for t-93 (applied in order): + 1. [stack transform] merge: malloc, free, arena_dalloc, je_malloc + 2. [sample filter] includes function: f-142 (dxgi.dll!CDXGISwapChain::PresentImpl) + 3. [sample filter] during markers matching: --search Paint + +Marker Filters for t-93: + 1. search: Paint + 2. min-duration: 5ms + +Filters for other threads: + t-0: 2 sample/stack filters, 1 marker filter + t-99: 1 sample/stack filter, 0 marker filters + +Bookmarks loaded: + - view: spike1 + - filter: no-allocators +``` + +--- + +## Filter Types + +### Sample Filters (Inclusion/Exclusion) + +Control which samples are included in analysis: + +```bash +# Include samples where... +# OR semantics: use -any suffix with comma-separated list +--includes-any-function f-142,f-143 # Stack contains f-142 OR f-143 +--includes-function f-142 # Stack contains this function (can repeat for AND) + +--includes-prefix f-1,f-2,f-3 # Stack starts with this sequence +--includes-suffix f-999 # Stack ends with this function + +--during-marker --search Paint # Timestamp falls within matching marker +--during-marker --category Graphics # Timestamp falls within marker in category + +# Exclude samples where... +--excludes-any-function malloc,free # Stack does not contain malloc OR free +--excludes-function malloc # Stack does not contain this function +--outside-marker --search GC # Timestamp outside matching markers + +# Sample filters affect: +# - thread samples (which samples aggregate) +# - thread markers (which samples contribute to marker statistics) +# - thread functions (which samples count toward function time) + +# Combining filters: +# - Same flag repeated = AND (--includes-function f-1 --includes-function f-2 = has both) +# - -any suffix = OR (--includes-any-function f-1,f-2 = has either) +``` + +### Stack Transform Filters + +Modify stack traces in all samples: + +```bash +# Merge (remove functions from stacks, collapse callers to callees) +--merge malloc,free # Remove these functions +--merge-regex "^(malloc|free)" # Remove matching functions +# A -> malloc -> B becomes A -> B + +# Root-at (show only subtree rooted at function) +--root-at f-142 # Show only time within this function +# Only shows stacks that include f-142, with f-142 as root + +# Strip prefix (remove leading frames) +--strip-prefix f-1,f-2,f-3 # Remove these from stack tops +# f-1 -> f-2 -> f-3 -> f-4 becomes f-4 + +# Strip suffix (remove trailing frames) +--strip-suffix f-999 # Remove these from stack bottoms +# f-1 -> f-2 -> f-999 becomes f-1 -> f-2 + +# Stack transforms affect: +# - thread samples (how stacks are displayed) +# - thread functions (which functions appear in list) +# - Call tree structure +``` + +### Thread and Zoom (Spatial/Temporal) + +Not technically "filters" but part of the view context: + +```bash +# Thread selection +--thread t-93 # Single thread (ephemeral) +--threads t-0,t-93 # Multiple threads (ephemeral) +pq thread select t-93 # Single thread (sticky) + +# Zoom (time range) +--zoom ts-6,ts-7 # Specific range (ephemeral) +--zoom m-158 # Marker's time range (ephemeral) +pq zoom push ts-6,ts-7 # Push range (sticky) +pq zoom push m-158 # Push marker's range (sticky, ✅ implemented) +``` + +--- + +## Bookmark Types + +### View Bookmarks + +Capture spatial/temporal context (where/when you're looking): + +```bash +pq bookmark view spike1 --zoom ts-6,ts-7 --threads t-0,t-93 +pq bookmark view gpu-idle --thread t-93 --zoom ts-p,ts-q + +# View bookmarks include: +# - Time range (zoom level) +# - Thread selection (single or multiple) +``` + +### Filter Bookmarks + +Capture analytical transforms (how you're analyzing): + +```bash +pq bookmark filter no-allocators \ + --merge malloc,free,arena_dalloc,je_malloc \ + --merge-regex "^moz_x" + +pq bookmark filter paint-only \ + --during-marker --search Paint \ + --root-at f-50 + +# Filter bookmarks include: +# - Sample filters (includes/excludes) +# - Stack transforms (merge/root/strip) +``` + +### Using Bookmarks + +```bash +# Ephemeral use (doesn't change state) +pq thread samples --view spike1 +pq thread samples --filter no-allocators +pq thread samples --view spike1 --filter no-allocators + +# Sticky load (changes state) +pq bookmark load view spike1 # Sets thread + zoom +pq bookmark load filter no-allocators # Applies filters +pq status # Shows loaded bookmarks +pq thread samples # Uses loaded context + +# Unload +pq bookmark unload view # Clear view bookmark +pq bookmark unload filter # Clear filter bookmark +pq bookmark unload all # Clear all bookmarks +``` + +--- + +## Example Scenarios + +### Scenario 1: Quick CPU Spike Investigation + +```bash +# See profile overview +$ pq profile info +# Output shows: "160% CPU for 390ms [ts-6,ts-7]" + +# Quick peek at that spike +$ pq thread samples --zoom ts-6,ts-7 --limit 20 +# Looks interesting, main thread doing Paint + +# Check GPU thread during same time +$ pq thread samples --thread t-93 --zoom ts-6,ts-7 --limit 20 +# GPU doing Present + +# Make spike sticky to investigate further +$ pq zoom push ts-6,ts-7 +$ pq thread select t-0 +$ pq thread samples + +# Check markers from both threads +$ pq thread markers --threads t-0,t-93 +# See WM_PAINT on t-0, Composite on t-93 at same time + +# Done with spike +$ pq zoom pop +``` + +### Scenario 2: Eliminating Allocator Noise + +```bash +# Call tree is noisy +$ pq thread samples +# Lots of malloc/free/arena_dalloc + +# Try merging ephemerally +$ pq thread samples --merge malloc,free,arena_dalloc --limit 30 +# Better! See actual work + +# Make it sticky +$ pq filter push --merge malloc,free,arena_dalloc,je_malloc +$ pq filter push --merge-regex "^moz_x" +$ pq thread samples +# Clean call tree across all commands + +# Save for future use +$ pq bookmark filter no-allocators --merge malloc,free,arena_dalloc,je_malloc --merge-regex "^moz_x" + +# Later session (on same profile) +$ pq bookmark load filter no-allocators +``` + +### Scenario 3: Analyzing Time in Specific Function + +```bash +# Find expensive function +$ pq thread functions --search PresentImpl +# f-142. dxgi.dll!CDXGISwapChain::PresentImpl - 16.4% total + +# See what it's calling (ephemeral root) +$ pq thread samples --root-at f-142 --limit 30 +# Shows subtree rooted at PresentImpl + +# Also filter to only samples that include it +$ pq filter push --includes-function f-142 +$ pq filter push --root-at f-142 +$ pq thread samples +# Now analyzing only time within PresentImpl + +# Check markers during this work +$ pq thread markers +# Sample filters affect marker aggregation too + +# Clear when done +$ pq filter clear +``` + +### Scenario 4: Cross-Thread Causality Chain + +```bash +# Main thread fires Paint +$ pq thread select t-0 +$ pq marker info m-158 +# WindowProc WM_PAINT at 1h2m (33.52ms) + +# Zoom to that marker +$ pq zoom push m-158 + +# See what GPU did during same time +$ pq thread samples --thread t-93 --limit 20 +# GPU doing Present work + +# Make GPU thread sticky +$ pq thread select t-93 + +# Only look at samples during Paint markers +$ pq filter push --during-marker --search Paint +$ pq thread samples +# Now only see GPU work that happened during Paint + +# Save this analysis (bookmarks are per-profile) +$ pq bookmark view paint-causality --zoom m-158 --thread t-93 +$ pq bookmark filter paint-only --during-marker --search Paint + +# Later: reload entire analysis (same profile) +$ pq bookmark load view paint-causality +$ pq bookmark load filter paint-only +$ pq thread samples +``` + +--- + +## Implementation Notes + +### Per-Thread Filter Scoping + +**Why per-thread?** Function handles (e.g., `f-142`) are thread-specific. A function handle on thread t-0 has no meaning on thread t-93. + +```bash +pq thread select t-0 +pq thread functions --search Present +# f-142. dxgi.dll!CDXGISwapChain::PresentImpl on t-0 + +pq filter push --includes-function f-142 # Filter for t-0 + +pq thread select t-93 +# f-142 on t-93 is a DIFFERENT function! +# t-93 has its own independent filter stack (empty initially) +``` + +**Behavior when switching threads:** + +- Each thread maintains its own sample/stack filter stack +- Each thread maintains its own marker filter stack +- Zoom is global (applies across all threads) +- Thread selection is global (which thread is currently active) + +**Status summary:** + +- `pq status` shows filters for current thread in detail +- Lists other threads with filter count summaries + +### Filter Application Order + +Sample/stack filters are applied in **push order** (the order you pushed them onto the stack). + +This is critical because filters can depend on each other: + +```bash +# Example: Strip prefix, then filter by new prefix +pq filter push --strip-prefix A,B,C # 1. Transform: remove A→B→C +pq filter push --includes-prefix D # 2. Filter: include only stacks starting with D +# Filter 2 sees stacks AFTER filter 1's transformation + +# Example: Filter samples, then merge stacks +pq filter push --includes-function PresentImpl # 1. Only samples with PresentImpl +pq filter push --merge malloc,free # 2. Merge allocators in those samples +``` + +**General guidance for typical use:** + +1. Stack transforms first (--strip-prefix, --merge, --root-at) +2. Sample filters second (--includes, --excludes, --during-marker) + +But the single ordered stack gives you flexibility when needed. + +### Marker Filters vs Sample Filters Using Markers + +**Two different concepts:** + +1. **Marker filters** - affect marker display only + + ```bash + pq marker filter push --search Paint + pq thread markers # Shows only Paint markers + ``` + +2. **Sample filters using markers** - filter samples based on marker timing + ```bash + pq filter push --during-marker --search Paint + pq thread samples # Shows only samples during Paint markers + ``` + +**Important:** Sample filters that reference markers (e.g., `--during-marker`) use the **unfiltered** marker set, not the display-filtered set. Marker filters only affect `pq thread markers` output. + +### Zoom Stack Behavior + +Zoom works like a stack, and nested zooms must be contained within parent ranges: + +```bash +pq zoom push ts-6,ts-7 # Stack: [full profile, ts-6 to ts-7] +pq zoom push ts-6a,ts-6c # Stack: [full profile, ts-6 to ts-7, ts-6a to ts-6c] + # Validates ts-6a,ts-6c is within ts-6,ts-7 +pq zoom pop # Stack: [full profile, ts-6 to ts-7] +pq zoom pop # Stack: [full profile] +pq zoom pop # No-op, already at root + +# Invalid: non-nested range +pq zoom push ts-6,ts-7 +pq zoom push ts-A,ts-B # Error: ts-A,ts-B not within ts-6,ts-7 +``` + +### Filter Stack Behavior + +Filters form a **single ordered stack** regardless of filter type: + +```bash +pq filter push --includes-function f-142 # Stack: [f-142] +pq filter push --during-marker --search Paint # Stack: [f-142, Paint] +pq filter push --merge malloc # Stack: [f-142, Paint, malloc] + +pq filter list +# Shows: 3 filters in order +# 1. [sample filter] includes function: f-142 +# 2. [sample filter] during markers: --search Paint +# 3. [stack transform] merge: malloc + +pq filter pop # Remove most recent filter (malloc) + # Stack: [f-142, Paint] + +pq filter pop 2 # Remove 2 most recent filters + # Stack: [] + +pq filter clear # Remove ALL filters + # Stack: [] + +# Save complex filter stacks via bookmarks +pq bookmark filter my-analysis --includes f-142 --during-marker --search Paint --merge malloc +``` + +### Bookmark Storage + +Bookmarks are **per-profile** and session-local (stored in daemon memory): + +- Scoped to the current profile URL/path +- Lost when session ends (`pq stop`) +- Not shared across different profiles or sessions +- Loading a different profile = different set of bookmarks +- Future: could persist to ~/.pq/bookmarks// for cross-session use +- Future: export/import for sharing with colleagues + +### Status Command + +`pq status` output structure: + +``` +Session: +Thread: () +Zoom: + [└─ parent: ] # If zoomed + [└─ parent: ] # Can be nested +[Filters (applied in order):] + [N. [type] ] + [...] +[Bookmarks loaded:] + [- view: ] + [- filter: ] +``` + +Filter types displayed: + +- `[sample filter]` - includes/excludes samples +- `[stack transform]` - modifies stacks +- `[marker filter]` - temporal filtering + +--- + +## Future Enhancements + +### Auto-generated Bookmarks + +System could create bookmarks automatically: + +```bash +pq profile hotspots +# Output: +# 1. [ts-6,ts-7] 160% CPU for 390ms +# bookmark: spike:1 +# 2. [ts-A,ts-b] 160% CPU for 450ms +# bookmark: spike:2 + +pq bookmark load view spike:1 +# Automatically created bookmarks +``` + +### Bookmark Export/Import + +```bash +pq bookmark export spike1 > spike1.json +pq bookmark import spike1.json + +# Share bookmarks with colleagues +``` + +### Bookmark Aliases + +```bash +pq bookmark alias s1 spike1 +pq thread samples --view s1 +``` + +### Smart Merge Sets + +```bash +pq bookmark filter no-alloc --merge @allocators +# @allocators = predefined set: malloc, free, arena_dalloc, etc. + +pq bookmark filter no-overhead --merge @profiler-overhead +# @profiler-overhead = profiler stack frames +``` + +--- + +## Open Questions + +1. ✅ **Multiple threads in sticky state?** - RESOLVED + - `thread select t-0,t-93` supports multi-thread selection + +2. ✅ **Filter combination logic?** - RESOLVED + - Use `-any` suffix for OR: `--includes-any-function f-1,f-2` + - Repeated flags for AND: `--includes-function f-1 --includes-function f-2` + +3. ✅ **Zoom validation?** - RESOLVED + - Nested zooms must be contained within parent ranges + - Error if attempting to zoom to non-nested range + +4. ✅ **Bookmark namespaces?** - RESOLVED + - Bookmarks are per-profile (scoped to current profile) + +5. ✅ **Prefix semantics?** - RESOLVED + - `--includes-prefix f-1,f-2,f-3` means exactly f-1→f-2→f-3 sequence + +6. ✅ **Filter application order?** - RESOLVED + - Single ordered stack, applied in push order + - Order matters (can strip prefix then filter by new prefix) + +--- + +## Summary + +This proposal creates a consistent, composable system for managing analysis state in pq: + +- **Ephemeral filters** via flags work everywhere +- **Sticky state** via explicit push/pop/clear commands +- **Consistent terminology**: push/pop/clear for zoom and filters, load/unload for bookmarks +- **Stack-based**: zoom and filters can be layered and unwound +- **Single filter stack**: one ordered stack for all filters (order matters!) +- **Filter dependencies**: can transform stacks then filter samples +- **OR vs AND semantics**: `-any` suffix for OR, repeated flags for AND +- **Per-profile bookmarks**: scoped to current profile +- **Nested zoom validation**: ranges must be properly contained +- **Status** always shows current context including filter order +- **Composable** - filters, zoom, threads are independent dimensions + +The system is designed to be discoverable (flags are ephemeral by default) while supporting power-user workflows (sticky state and bookmarks). diff --git a/pq-todo.md b/pq-todo.md new file mode 100644 index 0000000000..2d4dda6b6a --- /dev/null +++ b/pq-todo.md @@ -0,0 +1,488 @@ +# pq To-Do List + +Feature wishlist and improvement ideas for the pq profile query tool. + +--- + +## Active Proposals + +**[Filters and Bookmarks System](pq-filters-and-bookmarks-proposal.md)** - Comprehensive design for ephemeral vs sticky state, filter system, and bookmarks. + +--- + +## Critical Priority (Blocking Effective Use) + +### 1. Persistent Context Display ✅ + +Every command output displays current context in a compact header: + +``` +[Thread: t-0 (GeckoMain) | View: Full profile | Full: 30.42s] +[Thread: t-0 (GeckoMain) | View: ts-Fo→ts-Fu (851.1ms) | Full: 30.42s] +[Thread: t-0,t-93 (GeckoMain, Renderer) | View: Full profile | Full: 30.42s] +``` + +### 2. Function Search/Filter ✅ + +Commands available: + +```bash +pq thread functions # List all functions with CPU% +pq thread functions --search Present # Substring search +pq thread functions --min-self 1 # Filter by self time percentage +pq thread functions --limit 50 # Limit results +``` + +### 3. Smart Range Navigation ⚠️ + +**Status:** Partially implemented - marker handles work, CPU spike navigation doesn't yet + +**Implemented:** + +```bash +pq zoom push m-158 # Zoom to marker's time range +``` + +**Still needed:** + +```bash +pq profile hotspots # List all high-CPU periods +pq zoom push --spike 1 # Jump to first spike +pq zoom push --spike next # Next spike from current position +pq profile hotspots --min-cpu 150% # Find sustained >150% periods +``` + +**Enhancement:** Named bookmarks: + +```bash +pq zoom push ts-Fo,ts-Fu --name "resize-thrash" +pq zoom list +pq zoom push resize-thrash +``` + +### 4. Cross-Thread Marker View ✅ + +Commands available: + +```bash +pq thread markers --thread t-0,t-93 # Merged view of specific threads +pq thread functions --thread t-0,t-93 # Functions from multiple threads +pq thread samples --thread t-0,t-93 # Samples from multiple threads +pq thread select t-0,t-93 # Select threads (sticky) +``` + +**Not yet implemented:** + +```bash +pq marker related m-158 # Show markers on other threads at same time +``` + +--- + +## High Priority (Significant Value) + +### 5. Relative Handle References ❌ + +**Problem:** Must scroll back to find handles like "m-168" when investigating + +**Proposed:** + +```bash +pq marker info m-@1 # First marker in last listing +pq marker info m-@last # Last marker in last listing +pq marker info m-@longest # Longest duration in last listing +pq marker info m-@prev # Previously inspected marker + +pq function expand f-@1 # First function in last listing +pq function expand f-@highest # Highest self-time +``` + +**Alternative:** Show rank in listings: + +``` +Markers in thread t-0 — 50 markers + #1 → m-147 (28.89ms) Runnable + #2 → m-148 (15.23ms) Runnable +``` + +Then allow: `pq marker info #1` or `pq marker info @1` + +### 6. Stack Availability Indicators ✅ + +Visual indicators (✓/✗) next to marker handles with legend: + +``` +Markers in thread t-0 (Parent Process) — 50 markers (filtered from 258060) +Legend: ✓ = has stack trace, ✗ = no stack trace + +By Name (top 15): + SimpleTaskQueue::AddTask 7 markers (instant) + Examples: m-25 ✓, m-26 ✓, m-27 ✓ + Runnable 15 markers (interval: min=1µs, avg=285µs) + Examples: m-20 ✗ (3.95ms), m-21 ✗ (61µs) +``` + +### 7. Bottom-Up Call Tree ✅ + +Command available: + +```bash +pq thread samples-bottom-up # Bottom-up call tree +``` + +Shows inverted call tree starting from leaf functions, directly answering "what code paths lead to this bottleneck?" + +### 8. Sample Output Filtering ⚠️ + +**Status:** Partially implemented (--limit exists for markers but not samples) + +**Flags needed:** + +```bash +pq thread samples --limit 30 # Top 30 functions only +pq thread samples --min-self 1% # Hide functions <1% self time +pq thread samples --max-depth 15 # Limit tree depth +pq thread samples --top-only # Skip call trees +pq thread samples --tree-only # Skip top functions list +``` + +**Enhancement:** Show truncation stats when call tree is cut off: + +``` +Regular Call Tree (showing top 30 functions, 249 lines omitted): + (root) [total: 100.0%] + └─ ... + +[249 lines omitted: 142 unique functions, max self time 0.3%, cumulative 2.1%] +``` + +### 9. Dual Percentages When Zoomed ✅ + +When zoomed, shows both view and full profile percentages: + +```bash +# When zoomed: +Functions (by self time): + f-1. win32u.dll!ZwUserMsgWaitForMultipleObjectsEx - self: 2024 (39.8% of view, 12.3% of full) + +# When not zoomed: +Functions (by self time): + f-1. win32u.dll!ZwUserMsgWaitForMultipleObjectsEx - self: 6916 (42.2%), total: 6916 (42.2%) +``` + +### 10. Inline Thread Selection ⚠️ + +**Proposed:** + +```bash +pq thread samples --thread t-93 # Query without selecting +pq thread info --thread t-0 # Peek at thread +pq thread markers --thread t-93 --search Paint +``` + +### 11. Function Info Shows Full Name ✅ + +The `pq function info` command displays both full and short names: + +```bash +Function f-1: + Thread: t-0 (GeckoMain) + Full name: win32u.dll!ZwUserMsgWaitForMultipleObjectsEx + Short name: ZwUserMsgWaitForMultipleObjectsEx + Is JS: false + ... +``` + +### 12. Module-Level Grouping ❌ + +**Proposed:** + +``` +Top Functions (by self time): + 63.5% ntdll.dll!ZwWaitForAlertByThreadId + 8.6% ntdll.dll!NtWaitForSingleObject + ... + +Module Summary: + 14.3% atidxx64.dll (AMD GPU Driver - 23 functions) + 12.8% xul.dll (Firefox Core - 156 functions) + 9.2% ntdll.dll (Windows NT - 12 functions) +``` + +### 13. Profile Summary Command ❌ + +**Proposed:** + +```bash +pq summary # Overall profile summary +pq thread summary # Current thread(s) summary +``` + +Example output: + +``` +Profile Summary [ts-Fo → ts-Fu] (851ms, 100% CPU) + +Top Threads: + t-93 (Renderer): 36.5% active, 63.5% waiting + t-0 (GeckoMain): 42.2% waiting, 34.3% active + +Hot Functions: + 16.3% dxgi.dll!CDXGISwapChain::PresentImpl + 7.7% WaitForFrameGPUQuery + 4.1% atidxx64.dll (AMD driver) + +Activity: + 91,300 Layout operations (30K style flushes) + 17 Composite frames (avg: 12.7ms) + 3 CSS transitions (200ms each) +``` + +--- + +## Medium Priority (Nice to Have) + +### 14. CPU Activity Timestamps Inline ✅ + +Timestamp names show actual times inline: + +```bash +pq profile info + +CPU activity over time: +- 100% for 390.6ms: [ts-6 → ts-7] (2.701s - 3.092s) +- 100% for 255.3ms: [ts-8 → ts-9] (3.102s - 3.357s) +- 100% for 851.1ms: [ts-Fo → ts-Fu] (9.453s - 10.305s) +``` + +### 15. Enhanced Zoom Output ✅ + +Zoom output shows duration, depth, and marker context: + +```bash +pq zoom push ts-i,ts-M +Pushed view range: ts-i → ts-M (6.991s - 10.558s, duration 3.567s) + ts-i: Start of CPU spike #3 (100% CPU sustained) + ts-M: End of marker m-143 (Composite frame) + Zoom depth: 2/5 (use "pq zoom pop" to go back) +``` + +### 16. Range Comparison ❌ + +**Proposed:** + +```bash +pq compare ts-6,ts-7 vs ts-8,ts-9 # Compare two ranges + +# Example output: +Comparison: [ts-6,ts-7] (391ms) vs [ts-8,ts-9] (255ms) + +CPU Activity: + Range 1: 100% CPU (390.6ms) Range 2: 100% CPU (255.3ms) + +Top Functions: + Range 1 Range 2 + 42.2% ZwWaitForAlertByThreadId 45.1% ZwWaitForAlertByThreadId (+2.9%) + 16.3% CDXGISwapChain::Present 18.7% CDXGISwapChain::Present (+2.4%) +``` + +### 17. Wait/Idle Analysis ❌ + +**Proposed:** + +```bash +pq thread waits # Show all wait operations +pq thread waits --min-duration 10ms # Significant waits only +pq thread waits --summary # Aggregate stats +``` + +### 18. Frame-Level Analysis ❌ + +**Proposed:** + +```bash +pq thread frames # List paint/composite frames +pq thread frames --slow # Frames >16ms (jank) +pq frame info 72 # Details about frame #72 +``` + +### 19. Stack-Level Inspection ❌ + +**Proposed:** + +```bash +pq thread stacks # Show heaviest individual stacks +pq thread stacks --limit 5 # Top 5 heaviest +``` + +### 20. Split Sample Command ⚠️ + +**Proposed:** + +```bash +pq thread samples-top [--limit N] # Just top functions +pq thread samples-tree [--max-depth N] # Just call tree +pq thread samples # All views (backward compat) +``` + +Note: `samples-bottom-up` is already a separate command (item #7). + +### 21. Cross-Thread Context ❌ + +**Proposed:** + +```bash +pq profile info --in-zoom # Show top threads/CPU in current zoom range +pq marker related m-158 # Show markers on other threads at same time +``` + +Example: + +``` +$ pq marker related m-168 +Marker m-168: Reflow (interruptible) at 3717465.724ms + +Related markers (±10ms window): + t-93 (Renderer): + m-5432 [3717465.2ms] Composite #72 (started 0.5ms before) + m-5433 [3717467.1ms] Texture uploads (during reflow) +``` + +### 22. Filter Provenance ❌ + +**Problem:** Output shows "50 markers (filtered from 258060)" but doesn't explain how filtering reduced the count + +Was it zoom? Search filter? Duration filter? Limit? Users can't tell what contributed to the reduction. + +**Proposed:** Show filter provenance chain: + +``` +50 markers shown (zoom: 258060 → 91300, filters: 91300 → 1200, limit: 1200 → 50) +``` + +Or more compact: + +``` +50 markers (from 258060: zoom→91300, filters→1200, limit→50) +``` + +This helps users understand whether they're missing important data due to filters or just seeing everything that matches their criteria. + +--- + +## Low Priority (Polish) + +### 23. Frequency Analysis Terminology ✅ + +Marker output uses clear terminology: + +``` +Frequency Analysis: + Image Paint: 29081.3 markers/sec (interval: min=4µs, avg=36µs, max=468µs) +``` + +The first number is frequency (markers/sec), and the min/avg/max values are intervals (time gaps between markers). + +### 24. Export/Save ❌ + +**Proposed:** + +```bash +pq thread samples --output report.txt # Save to file +pq thread markers --json > markers.json # JSON already works +pq session export investigation.pqsession # Save entire session state +``` + +### 25. Color Output ❌ + +**Proposed:** Color-code percentages, durations, and handles for easier scanning + +### 26. Progress Indicators ❌ + +**Proposed:** Show progress for operations >1s: + +``` +$ pq load large-profile.json.gz +Loading profile... 45% (123MB/273MB) +``` + +### 27. Sparklines/Histograms ❌ + +**Proposed:** ASCII sparklines for temporal distribution: + +``` +Markers in thread t-0: + Reflow: 534 markers ▁▂▃▅▇█▇▅▃▂▁ (peak: ts-Fo → ts-Fu) + Paint: 127 markers ▃▃▂▂▅▅▇█▃▂▁ (peak: ts-r → ts-s) +``` + +### 28. Smart Function Name Display ❌ + +**Enhancements:** + +- Allow `--name-width N` to control truncation +- Show ellipsis `...` when truncated +- For very long names, show start + end + +### 29. Auto-Suggest Next Steps ❌ + +**Proposed:** After commands, suggest related actions: + +``` +$ pq marker info m-168 +Marker m-168: Reflow (interruptible) - 907µs +... + +💡 Next steps: + pq marker stack m-168 # View call stack + pq zoom push m-168 # Zoom to this marker's time range + pq thread markers --search Reflow --min-duration 500 # Find similar markers +``` + +--- + +## Summary by Priority + +**Critical:** 1 item remaining (3 completed) + +- Smart range navigation - partially implemented (markers work, CPU spikes don't) + +**High:** 4 items remaining (5 completed) + +- Relative handle references +- Sample output filtering - partially implemented +- Inline thread selection +- Module-level grouping +- Profile summary command + +**Medium:** 7 items remaining (2 completed) + +- Range comparison +- Wait/idle analysis +- Frame-level analysis +- Stack-level inspection +- Split sample command - partially implemented +- Cross-thread context +- Filter provenance + +**Low:** 6 items remaining (1 completed) + +- Export/save +- Color output +- Progress indicators +- Sparklines/histograms +- Smart function name display +- Auto-suggest next steps + +--- + +## Core Features Already Implemented + +- **Marker support** with rich filtering (`--search`, `--category`, `--min-duration`, `--max-duration`, `--has-stack`, `--limit`, `--group-by`, `--auto-group`) +- **Function handles** (`function expand`, `function info`) +- **Smart function name truncation** (120 char limit, tree-based parsing) +- **Zoom range management** (`zoom push`, `zoom pop`, `zoom clear`, `status`) +- **Library/module names** in function display +- **Timestamp names + readable times** +- **Deep call trees** (Regular and Bottom-up/Inverted) +- **Persistent context display** in all command outputs diff --git a/src/profile-logic/call-tree.ts b/src/profile-logic/call-tree.ts index 5802354430..a6ce45a6ee 100644 --- a/src/profile-logic/call-tree.ts +++ b/src/profile-logic/call-tree.ts @@ -389,6 +389,10 @@ export class CallTree { this._weightType = weightType; } + getTotal(): number { + return this._rootTotalSummary; + } + getRoots() { return this._roots; } diff --git a/src/profile-logic/combined-cpu.ts b/src/profile-logic/combined-cpu.ts index 0ba89d513f..54a2af6438 100644 --- a/src/profile-logic/combined-cpu.ts +++ b/src/profile-logic/combined-cpu.ts @@ -3,6 +3,7 @@ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ import type { SamplesTable } from 'firefox-profiler/types'; +import { bisectionLeft } from '../utils/bisect'; /** * Represents CPU usage over time for a single thread. @@ -31,11 +32,15 @@ export type CpuRatioTimeSeries = { * extend a thread's CPU usage beyond its last sample time. * * @param threadSamples - Array of SamplesTable objects, one per thread + * @param rangeStart - Optional start time to filter samples (inclusive) + * @param rangeEnd - Optional end time to filter samples (exclusive) * @returns Combined CPU data with unified time array and summed CPU ratios, * or null if no threads have CPU data */ export function combineCPUDataFromThreads( - threadSamples: SamplesTable[] + threadSamples: SamplesTable[], + rangeStart?: number, + rangeEnd?: number ): CpuRatioTimeSeries | null { // Filter threads that have CPU ratio data. // We require at least two samples per thread; the first sample's CPU ratio @@ -44,11 +49,30 @@ export function combineCPUDataFromThreads( const threadsWithCPU: CpuRatioTimeSeries[] = []; for (const samples of threadSamples) { if (samples.threadCPURatio && samples.time.length >= 2) { + let time = samples.time; + let cpuRatio = samples.threadCPURatio; + let length = samples.length; + + // If a range is specified, slice the data to that range + if (rangeStart !== undefined && rangeEnd !== undefined) { + const startIndex = bisectionLeft(samples.time, rangeStart); + const endIndex = bisectionLeft(samples.time, rangeEnd, startIndex); + + if (startIndex < endIndex) { + time = samples.time.slice(startIndex, endIndex); + cpuRatio = samples.threadCPURatio.slice(startIndex, endIndex); + length = endIndex - startIndex; + } else { + // No samples in this range for this thread + continue; + } + } + threadsWithCPU.push({ - time: samples.time, - cpuRatio: samples.threadCPURatio, + time, + cpuRatio, maxCpuRatio: Infinity, - length: samples.length, + length, }); } } diff --git a/src/profile-query-cli/.npmignore b/src/profile-query-cli/.npmignore new file mode 100644 index 0000000000..0c37eee2a6 --- /dev/null +++ b/src/profile-query-cli/.npmignore @@ -0,0 +1,32 @@ +# Source files - only publish the built dist/pq.js +*.ts +webpack.config.js + +# Build artifacts (except dist/pq.js which is in "files") +dist/* +!dist/pq.js + +# Development files +node_modules/ +*.log +*.tmp +.DS_Store + +# Git files +.git/ +.gitignore + +# Editor configs +.vscode/ +.idea/ +*.swp +*.swo + +# Test files +*.test.ts +*.test.js +__tests__/ +tests/ + +# CI/CD +.github/ diff --git a/src/profile-query-cli/README.md b/src/profile-query-cli/README.md new file mode 100644 index 0000000000..d7f3b5cd7d --- /dev/null +++ b/src/profile-query-cli/README.md @@ -0,0 +1,300 @@ +# Profile Query CLI (`pq`) + +A command-line interface for querying Firefox Profiler profiles with persistent daemon sessions. + +## Architecture + +**Two-process model:** + +- **Daemon process**: Long-running background process that loads a profile via `ProfileQuerier` and keeps it in memory +- **Client process**: Short-lived process that sends commands to the daemon and prints results + +**IPC:** Unix domain sockets with line-delimited JSON messages + +**Session storage:** `~/.pq/` (or `$PQ_SESSION_DIR` for development) + +## Commands + +**Note:** On machines with `pq` in PATH, you can use `pq` instead of `./dist/pq.js` for shorter commands. + +```bash +# Build the CLI +yarn build-profile-query-cli + +# Basic usage +pq load # Start daemon and load profile +pq profile info # Print profile summary +pq thread info # Print detailed thread information +pq thread select # Select a thread (e.g., t-0, t-1) +pq thread samples # Show call tree and top functions +pq function expand # Show full untruncated function name (e.g., f-1) +pq function info # Show detailed function information +pq zoom push # Push a zoom range (e.g., 2.7,3.1 or ts-g,ts-G or m-158) +pq zoom pop # Pop the most recent zoom range +pq status # Show session status (selected thread, zoom ranges) +pq stop # Stop daemon +pq list-sessions # List all running daemon sessions + +# Multiple sessions +pq load --session +pq profile info --session +pq stop --session + +# Thread selection +pq thread select t-93 # Select thread t-93 +pq thread samples # View samples for selected thread +pq thread info --thread t-0 # View info for specific thread without selecting +``` + +## Project Structure + +``` +src/profile-query-cli/ +├── index.ts # CLI entry point, argument parsing, command routing +├── client.ts # Client logic: spawn daemon, send commands via socket +├── daemon.ts # Daemon logic: load profile, listen on socket, handle commands +├── session.ts # Session file management, socket paths, validation +├── protocol.ts # TypeScript types for IPC messages +├── webpack.config.js # Build config with shebang and Node.js polyfills +├── package.json # npm distribution metadata (dependencies defined in root) +└── tests/ # CLI integration tests +``` + +## Build & Distribution + +This package uses a **bundled distribution approach**: + +- **Source code**: Lives in `src/profile-query-cli/` within the firefox-devtools/profiler monorepo +- **Dependencies**: Defined in the root `package.json` (react, redux, protobufjs, etc.) +- **Build process**: Webpack bundles and minifies everything into a single ~640KB `dist/pq.js` file (~187KB gzipped) with zero runtime dependencies +- **Published artifact**: Just the `dist/pq.js` executable is published to npm as `@firefox-profiler/pq` +- **Package.json**: Contains only npm metadata - it does NOT list dependencies since they're pre-bundled + +This means: +- Users who install via npm get a self-contained binary that just works +- Developers working on the CLI use the root package.json dependencies +- The `package.json` in this directory is for npm publishing only, not for development + +To publish: +```bash +# From repository root +yarn build-profile-query-cli +cd src/profile-query-cli +npm publish +``` + +## Session Management + +**Session directory:** `~/.pq/` or `$PQ_SESSION_DIR` + +**Files per session:** + +``` +~/.pq/ +├── current # Symlink to current session socket +├── .sock # Unix domain socket for IPC +├── .json # Session metadata (PID, profile path, timestamps) +└── .log # Daemon logs (kept for debugging) +``` + +**Session metadata example:** + +```json +{ + "id": "abc123xyz", + "socketPath": "/Users/user/.pq/abc123xyz.sock", + "logPath": "/Users/user/.pq/abc123xyz.log", + "pid": 12345, + "profilePath": "/path/to/profile.json", + "createdAt": "2025-10-31T10:00:00.000Z" +} +``` + +## Development Workflow + +**Environment variable isolation:** + +```bash +export PQ_SESSION_DIR="./.pq-dev" # Use local directory instead of ~/.pq +pq load profile.json # or: ./dist/pq.js load profile.json +``` + +All test scripts automatically set `PQ_SESSION_DIR="./.pq-dev"` to avoid polluting global state. + +**Build:** + +```bash +yarn build-profile-query-cli # Creates ./dist/pq.js, global `pq` forwards to this +``` + +**Unit tests:** + +```bash +yarn test profile-query +``` + +**CLI integration tests:** + +```bash +yarn test:cli +``` + +## Implementation Details + +**Daemon startup (client.ts):** + +1. Spawn detached Node.js process with `--daemon` flag +2. Poll every 50ms for session readiness (max 10 seconds) +3. Check: session ID exists → metadata exists → session validates +4. Return immediately when ready + +**IPC protocol (protocol.ts):** + +```typescript +// Client → Daemon +type ClientMessage = + | { type: 'command'; command: ClientCommand } + | { type: 'shutdown' } + | { type: 'status' }; + +type ClientCommand = + | { command: 'profile'; subcommand: 'info' | 'threads' } + | { + command: 'thread'; + subcommand: 'info' | 'select' | 'samples'; + thread?: string; + } + | { command: 'zoom'; subcommand: 'push' | 'pop'; range?: string } + | { command: 'status' }; +// ... and more + +// Daemon → Client +type ServerResponse = + | { type: 'success'; result: string } + | { type: 'error'; error: string } + | { type: 'loading' } + | { type: 'ready' }; +``` + +**Session validation (session.ts):** + +- Check PID is running (`process.kill(pid, 0)`) +- Check socket file exists +- Auto-cleanup stale sessions + +**Symlinks:** + +- `current` symlink uses relative path (`sessionId.sock`) +- Resolved to absolute in `getCurrentSocketPath()` when needed + +## Current State + +**Implemented:** + +- ✅ Persistent daemon with profile loading +- ✅ Unix socket IPC +- ✅ Multiple concurrent sessions +- ✅ Session management (current session, explicit session IDs) +- ✅ Environment variable isolation (`PQ_SESSION_DIR`) +- ✅ Manual test scripts +- ✅ `profile info` command (shows profile name, platform, threads, CPU activity) +- ✅ `thread info` command (shows thread details, CPU activity over time) +- ✅ `thread select` command (selects a thread for subsequent queries) +- ✅ `thread samples` command (call tree, top functions, inverted tree, heaviest stack) +- ✅ `zoom push` and `zoom pop` commands (time range filtering with multiple format support, including marker handles) +- ✅ `status` command (shows selected thread and zoom range stack) +- ✅ `list-sessions` command (shows all running sessions) + +- ✅ `function expand` command (shows full untruncated function name) +- ✅ `function info` command (shows detailed function metadata) + +**Partially implemented (parsed and wired up but not functional):** + +- ⚠️ `profile threads` - throws "unimplemented" in daemon +- ⚠️ `thread markers` - throws "unimplemented" in daemon +- ⚠️ `thread functions` - throws "unimplemented" in daemon +- ⚠️ `marker info` and `marker select` - throws "unimplemented" in daemon +- ⚠️ `sample info` and `sample select` - throws "unimplemented" in daemon +- ⚠️ `function select` - throws "unimplemented" in daemon + +## Build Configuration + +**Key webpack settings:** + +- `target: 'node'` - Node.js output +- `stats: 'errors-warnings'` - Quiet builds +- `BannerPlugin` - Adds `#!/usr/bin/env node` shebang +- `BannerPlugin` - Adds `globalThis.self = globalThis` polyfill for browser globals +- `optimization.minimize: false` - Keep readable stack traces +- Postbuild: `chmod +x dist/pq.js` + +## Adding New Commands + +To add a new command, you need to modify **4 files** (client.ts doesn't need changes as it generically forwards commands): + +### Step 1: Define the command type in `protocol.ts` + +Add your command to the `ClientCommand` type union: + +```typescript +export type ClientCommand = + | { command: 'profile'; subcommand: 'info' | 'threads' } + | { command: 'marker'; subcommand: 'info'; marker?: string } // Add new command + | ... +``` + +### Step 2: Parse CLI arguments in `index.ts` + +Add a case to the command switch statement: + +```typescript +case 'marker': { + const subcommand = argv._[1] ?? 'info'; + const marker = argv.marker; // Read from --marker flag + if (subcommand === 'info' || subcommand === 'select') { + const result = await sendCommand( + SESSION_DIR, + { command: 'marker', subcommand, marker }, + argv.session + ); + console.log(result); + } else { + console.error(`Error: Unknown command ${command} ${subcommand}`); + process.exit(1); + } + break; +} +``` + +**Note:** client.ts doesn't need changes - it generically forwards all commands via `sendCommand()`. + +### Step 3: Handle the command in `daemon.ts` + +Add a case to `processCommand()`: + +```typescript +case 'marker': + switch (command.subcommand) { + case 'info': + return this.querier!.markerInfo(command.marker); + case 'select': + return this.querier!.markerSelect(command.marker); + default: + throw assertExhaustiveCheck(command); + } +``` + +### Step 4: Implement ProfileQuerier methods in `src/profile-query/index.ts` + +```typescript +async markerInfo(markerHandle?: string): Promise { + // Implementation + return formatMarkerInfo(this._store, this._threadMap, markerHandle); +} +``` + +### Step 5: Update documentation + +- Add the command to the help text in `index.ts` (the `printUsage()` function) +- Add the command to the "Commands" section of this README +- Add the command to the "Current State" section as implemented diff --git a/src/profile-query-cli/client.ts b/src/profile-query-cli/client.ts new file mode 100644 index 0000000000..c1a5614725 --- /dev/null +++ b/src/profile-query-cli/client.ts @@ -0,0 +1,287 @@ +/** + * Client for communicating with the pq daemon. + */ + +import * as net from 'net'; +import * as fs from 'fs'; +import * as path from 'path'; +import * as child_process from 'child_process'; +import type { + ClientCommand, + ClientMessage, + ServerResponse, + CommandResult, +} from './protocol'; +import { + cleanupSession, + generateSessionId, + getCurrentSessionId, + getCurrentSocketPath, + getSocketPath, + loadSessionMetadata, + validateSession, +} from './session'; +import { BUILD_HASH } from './constants'; + +/** + * Send a message to the daemon and return the raw response. + */ +async function sendRawMessage( + sessionDir: string, + message: ClientMessage, + sessionId?: string +): Promise { + const resolvedSessionId = sessionId || getCurrentSessionId(sessionDir); + + if (!resolvedSessionId) { + throw new Error('No active session. Run "pq load " first.'); + } + + // Validate the session + if (!validateSession(sessionDir, resolvedSessionId)) { + cleanupSession(sessionDir, resolvedSessionId); + throw new Error( + `Session ${resolvedSessionId} is not running or is invalid.` + ); + } + + // Check build hash matches + const metadata = loadSessionMetadata(sessionDir, resolvedSessionId); + if (metadata && metadata.buildHash !== BUILD_HASH) { + cleanupSession(sessionDir, resolvedSessionId); + throw new Error( + `Session ${resolvedSessionId} was built with a different version (daemon: ${metadata.buildHash}, client: ${BUILD_HASH}). The daemon has been stopped. Please run "pq load " again.` + ); + } + + const socketPath = sessionId + ? getSocketPath(sessionDir, sessionId) + : getCurrentSocketPath(sessionDir); + + if (!socketPath || !fs.existsSync(socketPath)) { + throw new Error(`Socket not found for session ${resolvedSessionId}`); + } + + return new Promise((resolve, reject) => { + const socket = net.connect(socketPath); + let buffer = ''; + + socket.on('connect', () => { + // Send the message + socket.write(JSON.stringify(message) + '\n'); + }); + + socket.on('data', (data) => { + buffer += data.toString(); + + // Look for complete response (newline-delimited JSON) + const newlineIndex = buffer.indexOf('\n'); + if (newlineIndex !== -1) { + const line = buffer.substring(0, newlineIndex); + try { + const response = JSON.parse(line) as ServerResponse; + socket.end(); + resolve(response); + } catch (error) { + reject(new Error(`Failed to parse response: ${error}`)); + } + } + }); + + socket.on('error', (error) => { + reject(new Error(`Socket error: ${error.message}`)); + }); + + socket.on('timeout', () => { + socket.destroy(); + reject(new Error('Connection timeout')); + }); + + socket.setTimeout(30000); // 30 second timeout + }); +} + +/** + * Send a message to the daemon and return the result. + * Only works for messages that return success responses. + * Result can be either a string (legacy) or a structured CommandResult. + */ +export async function sendMessage( + sessionDir: string, + message: ClientMessage, + sessionId?: string +): Promise { + const response = await sendRawMessage(sessionDir, message, sessionId); + + if (response.type === 'success') { + return response.result; + } else if (response.type === 'error') { + throw new Error(response.error); + } else { + throw new Error(`Unexpected response type: ${response.type}`); + } +} + +/** + * Send a status check to the daemon and return the response. + */ +async function sendStatusMessage( + sessionDir: string, + sessionId?: string +): Promise { + return sendRawMessage(sessionDir, { type: 'status' }, sessionId); +} + +/** + * Send a command to the daemon. + * Result can be either a string (legacy) or a structured CommandResult. + */ +export async function sendCommand( + sessionDir: string, + command: ClientCommand, + sessionId?: string +): Promise { + return sendMessage(sessionDir, { type: 'command', command }, sessionId); +} + +/** + * Start a new daemon for the given profile. + * Uses a two-phase approach: + * 1. Wait for daemon to be validated (short 500ms timeout) + * 2. Wait for profile to load via status checks (longer 60s timeout) + */ +export async function startNewDaemon( + sessionDir: string, + profilePath: string, + sessionId?: string +): Promise { + // Check if this is a URL + const isUrl = + profilePath.startsWith('http://') || profilePath.startsWith('https://'); + + // Resolve the absolute path (only for file paths, not URLs) + const absolutePath = isUrl ? profilePath : path.resolve(profilePath); + + // Check if file exists (skip this check for URLs) + if (!isUrl && !fs.existsSync(absolutePath)) { + throw new Error(`Profile file not found: ${absolutePath}`); + } + + // Generate a session ID upfront if not provided, so we know exactly which + // session to wait for (avoids race condition with existing sessions) + const targetSessionId = sessionId || generateSessionId(); + + // Get the path to the current script (pq.js) + const scriptPath = process.argv[1]; + + // Spawn the daemon process (detached from parent) + const child = child_process.spawn( + process.execPath, // node + [scriptPath, '--daemon', absolutePath, '--session', targetSessionId], + { + detached: true, + stdio: 'ignore', // Don't pipe stdin/stdout/stderr + env: { ...process.env, PQ_SESSION_DIR: sessionDir }, // Pass sessionDir via env + } + ); + + // Unref so parent can exit + child.unref(); + + // Phase 1: Wait for daemon to be validated (short timeout) + const daemonStartMaxAttempts = 10; // 10 * 50ms = 500ms + let attempts = 0; + + while (attempts < daemonStartMaxAttempts) { + await new Promise((resolve) => setTimeout(resolve, 50)); + attempts++; + + // Validate the session (checks metadata exists, process running, socket exists) + if (validateSession(sessionDir, targetSessionId)) { + // Daemon is validated and running + break; + } + } + + // Check if daemon started successfully after polling + if (!validateSession(sessionDir, targetSessionId)) { + throw new Error( + `Failed to start daemon: session not validated after ${daemonStartMaxAttempts * 50}ms` + ); + } + + // Phase 2: Wait for profile to load by checking status (longer timeout) + const profileLoadMaxAttempts = 600; // 600 * 100ms = 60 seconds + attempts = 0; + + while (attempts < profileLoadMaxAttempts) { + await new Promise((resolve) => setTimeout(resolve, 100)); + attempts++; + + try { + const response = await sendStatusMessage(sessionDir, targetSessionId); + + switch (response.type) { + case 'ready': + // Profile loaded successfully + return targetSessionId; + + case 'loading': + // Still loading, keep waiting + continue; + + case 'error': + // Profile load failed, fail immediately + throw new Error(response.error); + + default: + // Unexpected response type + throw new Error( + `Unexpected response type: ${(response as any).type}` + ); + } + } catch (error) { + // Socket connection errors - daemon might still be setting up + // Keep retrying unless it's an explicit error response + if ( + error instanceof Error && + error.message.startsWith('Profile load failed') + ) { + throw error; + } + continue; + } + } + + // If we got here, profile load timed out + throw new Error( + `Profile load timeout after ${profileLoadMaxAttempts * 100}ms` + ); +} + +/** + * Stop a running daemon. + */ +export async function stopDaemon( + sessionDir: string, + sessionId?: string +): Promise { + const resolvedSessionId = sessionId || getCurrentSessionId(sessionDir); + + if (!resolvedSessionId) { + throw new Error('No active session to stop.'); + } + + // Send shutdown command + try { + await sendMessage(sessionDir, { type: 'shutdown' }, resolvedSessionId); + } catch (error) { + // If the daemon is already dead, that's fine + console.error(`Note: ${error}`); + } + + // Wait a bit for cleanup + await new Promise((resolve) => setTimeout(resolve, 500)); + + console.log(`Session ${resolvedSessionId} stopped`); +} diff --git a/src/profile-query-cli/constants.ts b/src/profile-query-cli/constants.ts new file mode 100644 index 0000000000..3dbb385995 --- /dev/null +++ b/src/profile-query-cli/constants.ts @@ -0,0 +1,12 @@ +/** + * Build-time constants injected by webpack. + */ + +// This global is defined by webpack.DefinePlugin +declare const __BUILD_HASH__: string; + +/** + * Unique hash for this build, used to detect version mismatches + * between client and daemon. + */ +export const BUILD_HASH = __BUILD_HASH__; diff --git a/src/profile-query-cli/daemon.ts b/src/profile-query-cli/daemon.ts new file mode 100644 index 0000000000..acc6f1a156 --- /dev/null +++ b/src/profile-query-cli/daemon.ts @@ -0,0 +1,412 @@ +/** + * Daemon process for pq. + * Loads a profile and listens for commands on a Unix socket. + */ + +import * as net from 'net'; +import * as fs from 'fs'; +import { ProfileQuerier } from '../profile-query'; +import type { + ClientCommand, + ClientMessage, + ServerResponse, + SessionMetadata, + CommandResult, +} from './protocol'; +import { + generateSessionId, + getSocketPath, + getLogPath, + saveSessionMetadata, + setCurrentSession, + cleanupSession, + ensureSessionDir, +} from './session'; +import { assertExhaustiveCheck } from 'firefox-profiler/utils/types'; +import { BUILD_HASH } from './constants'; + +export class Daemon { + private querier: ProfileQuerier | null = null; + private server: net.Server | null = null; + private sessionDir: string; + private sessionId: string; + private socketPath: string; + private logPath: string; + private logStream: fs.WriteStream; + private profilePath: string; + private loadingProfile: boolean = false; + private profileLoadError: Error | null = null; + + constructor(sessionDir: string, profilePath: string, sessionId?: string) { + this.sessionDir = sessionDir; + this.profilePath = profilePath; + this.sessionId = sessionId || generateSessionId(); + this.socketPath = getSocketPath(sessionDir, this.sessionId); + this.logPath = getLogPath(sessionDir, this.sessionId); + this.logStream = fs.createWriteStream(this.logPath, { flags: 'a' }); + + // Redirect console to log file + this.redirectConsole(); + + // Handle shutdown signals + process.on('SIGINT', () => this.shutdown('SIGINT')); + process.on('SIGTERM', () => this.shutdown('SIGTERM')); + } + + private redirectConsole(): void { + const originalConsoleLog = console.log; + const originalConsoleError = console.error; + const originalConsoleWarn = console.warn; + + console.log = (...args: any[]) => { + const message = args.map((arg) => String(arg)).join(' '); + this.logStream.write(`[LOG] ${new Date().toISOString()} ${message}\n`); + originalConsoleLog(...args); + }; + + console.error = (...args: any[]) => { + const message = args.map((arg) => String(arg)).join(' '); + this.logStream.write(`[ERROR] ${new Date().toISOString()} ${message}\n`); + originalConsoleError(...args); + }; + + console.warn = (...args: any[]) => { + const message = args.map((arg) => String(arg)).join(' '); + this.logStream.write(`[WARN] ${new Date().toISOString()} ${message}\n`); + originalConsoleWarn(...args); + }; + } + + async start(): Promise { + try { + console.log(`Starting daemon for session ${this.sessionId}`); + console.log(`Profile path: ${this.profilePath}`); + console.log(`Socket path: ${this.socketPath}`); + console.log(`Log path: ${this.logPath}`); + + // Ensure session directory exists + ensureSessionDir(this.sessionDir); + + // Create Unix socket server BEFORE loading the profile + this.server = net.createServer((socket) => this.handleConnection(socket)); + + // Remove stale socket if it exists + if (fs.existsSync(this.socketPath)) { + fs.unlinkSync(this.socketPath); + } + + this.server.listen(this.socketPath, () => { + console.log(`Daemon listening on ${this.socketPath}`); + + // Save session metadata immediately + const metadata: SessionMetadata = { + id: this.sessionId, + socketPath: this.socketPath, + logPath: this.logPath, + pid: process.pid, + profilePath: this.profilePath, + createdAt: new Date().toISOString(), + buildHash: BUILD_HASH, + }; + saveSessionMetadata(this.sessionDir, metadata); + setCurrentSession(this.sessionDir, this.sessionId); + + console.log('Daemon ready (socket listening)'); + + // Start loading the profile in the background + this.loadProfileAsync(); + }); + + this.server.on('error', (error) => { + console.error(`Server error: ${error}`); + this.shutdown('error'); + }); + } catch (error) { + console.error(`Failed to start daemon: ${error}`); + process.exit(1); + } + } + + private async loadProfileAsync(): Promise { + this.loadingProfile = true; + try { + console.log('Loading profile...'); + this.querier = await ProfileQuerier.load(this.profilePath); + console.log('Profile loaded successfully'); + this.loadingProfile = false; + } catch (error) { + console.error(`Failed to load profile: ${error}`); + this.profileLoadError = + error instanceof Error ? error : new Error(String(error)); + this.loadingProfile = false; + } + } + + private handleConnection(socket: net.Socket): void { + console.log('Client connected'); + + let buffer = ''; + + socket.on('data', (data) => { + buffer += data.toString(); + + // Process complete lines + let newlineIndex: number; + while ((newlineIndex = buffer.indexOf('\n')) !== -1) { + const line = buffer.substring(0, newlineIndex); + buffer = buffer.substring(newlineIndex + 1); + + if (line.trim()) { + this.handleMessage(line, socket); + } + } + }); + + socket.on('error', (error) => { + console.error(`Socket error: ${error}`); + }); + + socket.on('end', () => { + console.log('Client disconnected'); + }); + } + + private handleMessage(line: string, socket: net.Socket): void { + try { + const message = JSON.parse(line) as ClientMessage; + console.log(`Received message: ${message.type}`); + + this.processMessage(message) + .then((response) => { + socket.write(JSON.stringify(response) + '\n'); + }) + .catch((error) => { + const errorResponse: ServerResponse = { + type: 'error', + error: String(error), + }; + socket.write(JSON.stringify(errorResponse) + '\n'); + }); + } catch (error) { + console.error(`Failed to parse message: ${error}`); + const errorResponse: ServerResponse = { + type: 'error', + error: `Failed to parse message: ${error}`, + }; + socket.write(JSON.stringify(errorResponse) + '\n'); + } + } + + private async processMessage( + message: ClientMessage + ): Promise { + switch (message.type) { + case 'status': { + // Return current daemon state + if (this.profileLoadError) { + return { + type: 'error', + error: `Profile load failed: ${this.profileLoadError.message}`, + }; + } + if (this.loadingProfile) { + return { type: 'loading' }; + } + if (this.querier) { + return { type: 'ready' }; + } + // Shouldn't happen, but handle gracefully + return { + type: 'error', + error: 'Profile not loaded', + }; + } + + case 'shutdown': { + console.log('Shutdown command received'); + // Send response before shutting down + const response: ServerResponse = { + type: 'success', + result: 'Shutting down', + }; + setImmediate(() => this.shutdown('command')); + return response; + } + + case 'command': { + // Commands require profile to be loaded + if (this.profileLoadError) { + return { + type: 'error', + error: `Profile load failed: ${this.profileLoadError.message}`, + }; + } + if (this.loadingProfile) { + return { + type: 'error', + error: 'Profile still loading, try again shortly', + }; + } + if (!this.querier) { + return { + type: 'error', + error: 'Profile not loaded', + }; + } + + const result = await this.processCommand(message.command); + return { + type: 'success', + result, + }; + } + + default: { + return { + type: 'error', + error: `Unknown message type: ${(message as any).type}`, + }; + } + } + } + + private async processCommand( + command: ClientCommand + ): Promise { + switch (command.command) { + case 'profile': + switch (command.subcommand) { + case 'info': + return this.querier!.profileInfo(); + case 'threads': + throw new Error('unimplemented'); + default: + throw assertExhaustiveCheck(command); + } + case 'thread': + switch (command.subcommand) { + case 'info': + return this.querier!.threadInfo(command.thread); + case 'select': + if (!command.thread) { + throw new Error('thread handle required for thread select'); + } + return this.querier!.threadSelect(command.thread); + case 'samples': + return this.querier!.threadSamples(command.thread); + case 'samples-top-down': + return this.querier!.threadSamplesTopDown( + command.thread, + command.callTreeOptions + ); + case 'samples-bottom-up': + return this.querier!.threadSamplesBottomUp( + command.thread, + command.callTreeOptions + ); + case 'markers': + return this.querier!.threadMarkers( + command.thread, + command.markerFilters + ); + case 'functions': + return this.querier!.threadFunctions( + command.thread, + command.functionFilters + ); + default: + throw assertExhaustiveCheck(command); + } + case 'marker': + switch (command.subcommand) { + case 'info': + if (!command.marker) { + throw new Error('marker handle required for marker info'); + } + return this.querier!.markerInfo(command.marker); + case 'stack': + if (!command.marker) { + throw new Error('marker handle required for marker stack'); + } + return this.querier!.markerStack(command.marker); + case 'select': + throw new Error('unimplemented'); + default: + throw assertExhaustiveCheck(command); + } + case 'sample': + switch (command.subcommand) { + case 'info': + throw new Error('unimplemented'); + case 'select': + throw new Error('unimplemented'); + default: + throw assertExhaustiveCheck(command); + } + case 'function': + switch (command.subcommand) { + case 'info': + if (!command.function) { + throw new Error('function handle required for function info'); + } + return this.querier!.functionInfo(command.function); + case 'expand': + if (!command.function) { + throw new Error('function handle required for function expand'); + } + return this.querier!.functionExpand(command.function); + case 'select': + throw new Error('unimplemented'); + default: + throw assertExhaustiveCheck(command); + } + case 'zoom': + switch (command.subcommand) { + case 'push': + if (!command.range) { + throw new Error('range parameter is required for zoom push'); + } + return this.querier!.pushViewRange(command.range); + case 'pop': + return this.querier!.popViewRange(); + case 'clear': + return this.querier!.clearViewRange(); + default: + throw assertExhaustiveCheck(command); + } + case 'status': + return this.querier!.getStatus(); + default: + throw assertExhaustiveCheck(command); + } + } + + private shutdown(reason: string): void { + console.log(`Shutting down daemon (reason: ${reason})`); + + if (this.server) { + this.server.close(); + } + + cleanupSession(this.sessionDir, this.sessionId); + + if (this.logStream) { + this.logStream.end(); + } + + console.log('Daemon stopped'); + process.exit(0); + } +} + +/** + * Start the daemon (called from CLI). + */ +export async function startDaemon( + sessionDir: string, + profilePath: string, + sessionId?: string +): Promise { + const daemon = new Daemon(sessionDir, profilePath, sessionId); + await daemon.start(); +} diff --git a/src/profile-query-cli/formatters.ts b/src/profile-query-cli/formatters.ts new file mode 100644 index 0000000000..76c693b193 --- /dev/null +++ b/src/profile-query-cli/formatters.ts @@ -0,0 +1,859 @@ +/** + * Text formatters for CommandResult types. + * These functions convert structured JSON results into human-readable text output. + */ + +import type { + StatusResult, + SessionContext, + WithContext, + FunctionExpandResult, + FunctionInfoResult, + ViewRangeResult, + ThreadInfoResult, + MarkerStackResult, + MarkerInfoResult, + ProfileInfoResult, + ThreadSamplesResult, + ThreadSamplesTopDownResult, + ThreadSamplesBottomUpResult, + ThreadMarkersResult, + ThreadFunctionsResult, + MarkerGroupData, + CallTreeNode, +} from './protocol'; +import { truncateFunctionName } from '../profile-query/function-list'; + +/** + * Format a SessionContext as a compact header line. + * Shows current thread selection, zoom range, and full profile duration. + */ +export function formatContextHeader(context: SessionContext): string { + // Thread info + let threadInfo = 'No thread selected'; + if (context.selectedThreadHandle && context.selectedThreads.length > 0) { + if (context.selectedThreads.length === 1) { + const thread = context.selectedThreads[0]; + threadInfo = `${context.selectedThreadHandle} (${thread.name})`; + } else { + const names = context.selectedThreads + .map((t: { name: string }) => t.name) + .join(', '); + threadInfo = `${context.selectedThreadHandle} (${names})`; + } + } + + // View range info + const rootDuration = context.rootRange.end - context.rootRange.start; + const formatDuration = (ms: number): string => { + if (ms < 1000) { + return `${ms.toFixed(1)}ms`; + } + return `${(ms / 1000).toFixed(2)}s`; + }; + + let viewInfo = 'Full profile'; + if (context.currentViewRange) { + const range = context.currentViewRange; + const rangeDuration = range.end - range.start; + viewInfo = `${range.startName}→${range.endName} (${formatDuration(rangeDuration)})`; + } + + const fullInfo = formatDuration(rootDuration); + + return `[Thread: ${threadInfo} | View: ${viewInfo} | Full: ${fullInfo}]`; +} + +/** + * Format a StatusResult as plain text. + */ +export function formatStatusResult(result: StatusResult): string { + let threadInfo = 'No thread selected'; + if (result.selectedThreadHandle && result.selectedThreads.length > 0) { + if (result.selectedThreads.length === 1) { + const thread = result.selectedThreads[0]; + threadInfo = `${result.selectedThreadHandle} (${thread.name})`; + } else { + const names = result.selectedThreads.map((t) => t.name).join(', '); + threadInfo = `${result.selectedThreadHandle} (${names})`; + } + } + + let rangesInfo = 'Full profile'; + if (result.viewRanges.length > 0) { + const rangeStrs = result.viewRanges.map((range) => { + return `${range.startName} to ${range.endName}`; + }); + rangesInfo = rangeStrs.join(' > '); + } + + return `\ +Session Status: + Selected thread: ${threadInfo} + View range: ${rangesInfo}`; +} + +/** + * Format a FunctionExpandResult as plain text. + */ +export function formatFunctionExpandResult( + result: WithContext +): string { + const contextHeader = formatContextHeader(result.context); + return `${contextHeader} + +Function ${result.functionHandle} (thread ${result.threadHandle}): +${result.fullName}`; +} + +/** + * Format a FunctionInfoResult as plain text. + */ +export function formatFunctionInfoResult( + result: WithContext +): string { + const contextHeader = formatContextHeader(result.context); + let output = `${contextHeader} + +Function ${result.functionHandle}: + Thread: ${result.threadHandle} (${result.threadName}) + Full name: ${result.fullName} + Short name: ${result.name} + Is JS: ${result.isJS} + Relevant for JS: ${result.relevantForJS}`; + + if (result.resource) { + output += `\n Resource: ${result.resource.name}`; + } + + if (result.library) { + output += `\n Library: ${result.library.name}`; + output += `\n Library path: ${result.library.path}`; + if (result.library.debugName) { + output += `\n Debug name: ${result.library.debugName}`; + } + if (result.library.debugPath) { + output += `\n Debug path: ${result.library.debugPath}`; + } + if (result.library.breakpadId) { + output += `\n Breakpad ID: ${result.library.breakpadId}`; + } + } + + return output; +} + +/** + * Format a ViewRangeResult as plain text. + */ +export function formatViewRangeResult(result: ViewRangeResult): string { + // Start with the basic message + let output = result.message; + + // For 'push' action, add enhanced information if available + if (result.action === 'push' && result.duration !== undefined) { + output += ` (duration: ${formatDuration(result.duration)})`; + + // If this is a marker zoom, show marker details + if (result.markerInfo) { + output += `\n Zoomed to: Marker ${result.markerInfo.markerHandle} - ${result.markerInfo.markerName}`; + output += `\n Thread: ${result.markerInfo.threadHandle} (${result.markerInfo.threadName})`; + } + + // Show zoom depth if available + if (result.zoomDepth !== undefined) { + output += `\n Zoom depth: ${result.zoomDepth}${result.zoomDepth > 1 ? ' (use "pq zoom pop" to go back)' : ''}`; + } + } + + return output; +} + +/** + * Format a ThreadInfoResult as plain text. + */ +export function formatThreadInfoResult( + result: WithContext +): string { + const contextHeader = formatContextHeader(result.context); + const endedAtStr = result.endedAtName || 'still alive at end of recording'; + + let output = `${contextHeader} + +Name: ${result.friendlyName} +Created at: ${result.createdAtName} +Ended at: ${endedAtStr} + +This thread contains ${result.sampleCount} samples and ${result.markerCount} markers. + +CPU activity over time:`; + + if (result.cpuActivity && result.cpuActivity.length > 0) { + for (const activity of result.cpuActivity) { + const indent = ' '.repeat(activity.depthLevel); + const percentage = Math.round( + (activity.cpuMs / (activity.endTime - activity.startTime)) * 100 + ); + output += `\n${indent}- ${percentage}% for ${activity.cpuMs.toFixed(1)}ms: [${activity.startTimeName} → ${activity.endTimeName}] (${activity.startTimeStr} - ${activity.endTimeStr})`; + } + } else { + output += '\nNo significant activity.'; + } + + return output; +} + +/** + * Format a MarkerStackResult as plain text. + */ +export function formatMarkerStackResult( + result: WithContext +): string { + const contextHeader = formatContextHeader(result.context); + let output = `${contextHeader} + +Stack trace for marker ${result.markerHandle}: ${result.markerName}\n`; + output += `Thread: ${result.threadHandle} (${result.friendlyThreadName})`; + + if (!result.stack || result.stack.frames.length === 0) { + return output + '\n\n(This marker has no stack trace)'; + } + + if (result.stack.capturedAt !== undefined) { + output += `\nCaptured at: ${result.stack.capturedAt.toFixed(3)}ms\n`; + } + + for (let i = 0; i < result.stack.frames.length; i++) { + const frame = result.stack.frames[i]; + output += `\n [${i + 1}] ${frame.nameWithLibrary}`; + } + + if (result.stack.truncated) { + output += '\n ... (truncated)'; + } + + return output; +} + +/** + * Format a MarkerInfoResult as plain text. + */ +export function formatMarkerInfoResult( + result: WithContext +): string { + const contextHeader = formatContextHeader(result.context); + let output = `${contextHeader} + +Marker ${result.markerHandle}: ${result.name}`; + if (result.tooltipLabel) { + output += ` - ${result.tooltipLabel}`; + } + output += '\n\n'; + + // Basic info + output += `Type: ${result.markerType ?? 'None'}\n`; + output += `Category: ${result.category.name}\n`; + + // Time and duration + const startStr = result.start.toFixed(3); + if (result.end !== null) { + const endStr = result.end.toFixed(3); + const durationMs = result.duration!; + let durationStr: string; + if (durationMs < 1) { + durationStr = `${(durationMs * 1000).toFixed(1)}µs`; + } else if (durationMs < 1000) { + durationStr = `${durationMs.toFixed(2)}ms`; + } else { + durationStr = `${(durationMs / 1000).toFixed(3)}s`; + } + output += `Time: ${startStr}ms - ${endStr}ms (${durationStr})\n`; + } else { + output += `Time: ${startStr}ms (instant)\n`; + } + + output += `Thread: ${result.threadHandle} (${result.friendlyThreadName})\n`; + + // Marker data fields + if (result.fields && result.fields.length > 0) { + output += '\nFields:\n'; + for (const field of result.fields) { + output += ` ${field.label}: ${field.formattedValue}\n`; + } + } + + // Schema description + if (result.schema?.description) { + output += '\nDescription:\n'; + output += ` ${result.schema.description}\n`; + } + + // Stack trace (truncated to 20 frames) + if (result.stack && result.stack.frames.length > 0) { + output += '\nStack trace:\n'; + if (result.stack.capturedAt !== undefined) { + output += ` Captured at: ${result.stack.capturedAt.toFixed(3)}ms\n`; + } + + for (let i = 0; i < result.stack.frames.length; i++) { + const frame = result.stack.frames[i]; + output += ` [${i + 1}] ${frame.nameWithLibrary}\n`; + } + + if (result.stack.truncated) { + output += `\nUse 'pq marker stack ${result.markerHandle}' for the full stack trace.\n`; + } + } + + return output; +} + +/** + * Format a ProfileInfoResult as plain text. + */ +export function formatProfileInfoResult( + result: WithContext +): string { + const contextHeader = formatContextHeader(result.context); + let output = `${contextHeader} + +Name: ${result.name}\n`; + output += `Platform: ${result.platform}\n\n`; + output += `This profile contains ${result.threadCount} threads across ${result.processCount} processes.\n`; + + if (result.processes.length === 0) { + output += '\n(CPU time information not available)'; + return output; + } + + output += '\nTop processes and threads by CPU usage:\n'; + + for (const process of result.processes) { + // Format process timing information + let timingInfo = ''; + if (process.startTime !== undefined && process.startTimeName) { + if (process.endTime !== null && process.endTimeName !== null) { + timingInfo = ` [${process.startTimeName} → ${process.endTimeName}]`; + } else { + timingInfo = ` [${process.startTimeName} → end]`; + } + } + + output += ` p-${process.processIndex}: ${process.name} [pid ${process.pid}]${timingInfo} - ${process.cpuMs.toFixed(3)}ms\n`; + + for (const thread of process.threads) { + output += ` ${thread.threadHandle}: ${thread.name} - ${thread.cpuMs.toFixed(3)}ms\n`; + } + + if (process.remainingThreads) { + output += ` + ${process.remainingThreads.count} more threads with combined CPU time ${process.remainingThreads.combinedCpuMs.toFixed(3)}ms and max CPU time ${process.remainingThreads.maxCpuMs.toFixed(3)}ms\n`; + } + } + + if (result.remainingProcesses) { + output += ` + ${result.remainingProcesses.count} more processes with combined CPU time ${result.remainingProcesses.combinedCpuMs.toFixed(3)}ms and max CPU time ${result.remainingProcesses.maxCpuMs.toFixed(3)}ms\n`; + } + + output += '\nCPU activity over time:\n'; + + if (result.cpuActivity && result.cpuActivity.length > 0) { + for (const activity of result.cpuActivity) { + const indent = ' '.repeat(activity.depthLevel); + const percentage = Math.round( + (activity.cpuMs / (activity.endTime - activity.startTime)) * 100 + ); + output += `${indent}- ${percentage}% for ${activity.cpuMs.toFixed(1)}ms: [${activity.startTimeName} → ${activity.endTimeName}] (${activity.startTimeStr} - ${activity.endTimeStr})\n`; + } + } else { + output += 'No significant activity.\n'; + } + + return output; +} + +/** + * Helper function to format a call tree node recursively. + * + * This formatter uses a "stack fragment" approach for single-child chains: + * - Root-level nodes always indent their children with tree symbols + * - Single-child continuations are rendered without tree symbols (as stack fragments) + * - Only nodes with multiple children use tree symbols to show branching + */ +function formatCallTreeNode( + node: CallTreeNode, + baseIndent: string, + useTreeSymbol: boolean, + isLastSibling: boolean, + depth: number, + lines: string[] +): void { + const totalPct = node.totalPercentage.toFixed(1); + const selfPct = node.selfPercentage.toFixed(1); + const displayName = truncateFunctionName(node.nameWithLibrary, 120); + + // Build the line prefix + let linePrefix: string; + if (useTreeSymbol) { + const symbol = isLastSibling ? '└─ ' : '├─ '; + linePrefix = baseIndent + symbol; + } else { + linePrefix = baseIndent; + } + + // Add function handle prefix if available + const handlePrefix = node.functionHandle ? `${node.functionHandle}. ` : ''; + + lines.push( + `${linePrefix}${handlePrefix}${displayName} [total: ${totalPct}%, self: ${selfPct}%]` + ); + + // Handle children and truncation + const hasChildren = node.children && node.children.length > 0; + const hasTruncatedChildren = node.childrenTruncated; + + if (hasChildren || hasTruncatedChildren) { + // Calculate the base indent for children + let childBaseIndent: string; + if (useTreeSymbol) { + // We used a tree symbol, so children need appropriate spine continuation + const spine = isLastSibling ? ' ' : '│ '; + childBaseIndent = baseIndent + spine; + } else { + // We didn't use a tree symbol (stack fragment), children keep the same base indent + childBaseIndent = baseIndent; + } + + if (hasChildren) { + const hasMultipleChildren = + node.children.length > 1 || !!hasTruncatedChildren; + + for (let i = 0; i < node.children.length; i++) { + const child = node.children[i]; + const isLast = i === node.children.length - 1 && !hasTruncatedChildren; + + // Children use tree symbols if: + // - There are multiple children (branching), OR + // - We're at root level (depth 0) - root children always get tree symbols + const childUsesTreeSymbol = hasMultipleChildren || depth === 0; + + formatCallTreeNode( + child, + childBaseIndent, + childUsesTreeSymbol, + isLast, + depth + 1, + lines + ); + } + } + + // Show combined elision info if children were omitted or depth limit reached + // Combine both types of elision into a single marker + if (hasTruncatedChildren) { + const truncPrefix = childBaseIndent + '└─ '; + const truncInfo = node.childrenTruncated!; + const combinedPct = truncInfo.combinedPercentage.toFixed(1); + const maxPct = truncInfo.maxPercentage.toFixed(1); + lines.push( + `${truncPrefix}... (${truncInfo.count} more children: combined ${combinedPct}%, max ${maxPct}%)` + ); + } + } +} + +/** + * Helper function to format a call tree. + */ +function formatCallTree(tree: CallTreeNode, title: string): string { + const lines: string[] = [`${title} Call Tree:`]; + + // The root node is virtual, so format its children + if (tree.children && tree.children.length > 0) { + for (let i = 0; i < tree.children.length; i++) { + const child = tree.children[i]; + const isLast = i === tree.children.length - 1; + // Root-level nodes don't use tree symbols (they are the starting points) + formatCallTreeNode(child, '', false, isLast, 0, lines); + } + } + + return lines.join('\n'); +} + +/** + * Format a ThreadSamplesResult as plain text. + */ +export function formatThreadSamplesResult( + result: WithContext +): string { + const contextHeader = formatContextHeader(result.context); + let output = `${contextHeader} + +Thread: ${result.friendlyThreadName}\n\n`; + + // Top functions by total time + output += 'Top Functions (by total time):\n'; + output += + ' (For a call tree starting from these functions, use: pq thread samples-top-down)\n\n'; + for (const func of result.topFunctionsByTotal) { + const totalCount = Math.round(func.totalSamples); + const totalPct = func.totalPercentage.toFixed(1); + const displayName = truncateFunctionName(func.nameWithLibrary, 120); + output += ` ${func.functionHandle}. ${displayName} - total: ${totalCount} (${totalPct}%)\n`; + } + + output += '\n'; + + // Top functions by self time + output += 'Top Functions (by self time):\n'; + output += + ' (For a call tree showing what calls these functions, use: pq thread samples-bottom-up)\n\n'; + for (const func of result.topFunctionsBySelf) { + const selfCount = Math.round(func.selfSamples); + const selfPct = func.selfPercentage.toFixed(1); + const displayName = truncateFunctionName(func.nameWithLibrary, 120); + output += ` ${func.functionHandle}. ${displayName} - self: ${selfCount} (${selfPct}%)\n`; + } + + output += '\n'; + + // Heaviest stack + const stack = result.heaviestStack; + output += `Heaviest stack (${stack.selfSamples.toFixed(1)} samples, ${stack.frameCount} frames):\n`; + + if (stack.frames.length === 0) { + output += ' (empty)\n'; + } else if (stack.frameCount <= 200) { + // Show all frames + for (let i = 0; i < stack.frames.length; i++) { + const frame = stack.frames[i]; + const displayName = truncateFunctionName(frame.nameWithLibrary, 120); + const totalCount = Math.round(frame.totalSamples); + const totalPct = frame.totalPercentage.toFixed(1); + const selfCount = Math.round(frame.selfSamples); + const selfPct = frame.selfPercentage.toFixed(1); + output += ` ${i + 1}. ${displayName} - total: ${totalCount} (${totalPct}%), self: ${selfCount} (${selfPct}%)\n`; + } + } else { + // Show first 100 + for (let i = 0; i < 100; i++) { + const frame = stack.frames[i]; + const displayName = truncateFunctionName(frame.nameWithLibrary, 120); + const totalCount = Math.round(frame.totalSamples); + const totalPct = frame.totalPercentage.toFixed(1); + const selfCount = Math.round(frame.selfSamples); + const selfPct = frame.selfPercentage.toFixed(1); + output += ` ${i + 1}. ${displayName} - total: ${totalCount} (${totalPct}%), self: ${selfCount} (${selfPct}%)\n`; + } + + // Show placeholder for skipped frames + const skippedCount = stack.frameCount - 200; + output += ` ... (${skippedCount} frames skipped)\n`; + + // Show last 100 + for (let i = stack.frameCount - 100; i < stack.frameCount; i++) { + const frame = stack.frames[i]; + const displayName = truncateFunctionName(frame.nameWithLibrary, 120); + const totalCount = Math.round(frame.totalSamples); + const totalPct = frame.totalPercentage.toFixed(1); + const selfCount = Math.round(frame.selfSamples); + const selfPct = frame.selfPercentage.toFixed(1); + output += ` ${i + 1}. ${displayName} - total: ${totalCount} (${totalPct}%), self: ${selfCount} (${selfPct}%)\n`; + } + } + + return output; +} + +/** + * Format a ThreadSamplesTopDownResult as plain text. + */ +export function formatThreadSamplesTopDownResult( + result: WithContext +): string { + const contextHeader = formatContextHeader(result.context); + let output = `${contextHeader} + +Thread: ${result.friendlyThreadName}\n\n`; + + // Top-down call tree + output += formatCallTree(result.regularCallTree, 'Top-Down'); + + return output; +} + +/** + * Format a ThreadSamplesBottomUpResult as plain text. + */ +export function formatThreadSamplesBottomUpResult( + result: WithContext +): string { + const contextHeader = formatContextHeader(result.context); + let output = `${contextHeader} + +Thread: ${result.friendlyThreadName}\n\n`; + + // Bottom-up call tree (inverted tree shows callers) + if (result.invertedCallTree) { + output += formatCallTree(result.invertedCallTree, 'Bottom-Up'); + } else { + output += 'Bottom-Up Call Tree:\n (unable to create bottom-up tree)'; + } + + return output; +} + +/** + * Format a ThreadMarkersResult as plain text. + */ +export function formatThreadMarkersResult( + result: WithContext +): string { + const contextHeader = formatContextHeader(result.context); + const lines: string[] = [contextHeader, '']; + + // Check if filters are active + const hasFilters = result.filters !== undefined; + const filterSuffix = + hasFilters && result.filteredMarkerCount !== result.totalMarkerCount + ? ` (filtered from ${result.totalMarkerCount})` + : ''; + + lines.push( + `Markers in thread ${result.threadHandle} (${result.friendlyThreadName}) — ${result.filteredMarkerCount} markers${filterSuffix}` + ); + lines.push('Legend: ✓ = has stack trace, ✗ = no stack trace\n'); + + if (result.filteredMarkerCount === 0) { + if (hasFilters) { + lines.push('No markers match the specified filters.'); + } else { + lines.push('No markers in this thread.'); + } + return lines.join('\n'); + } + + // Handle custom grouping if present + if (result.customGroups && result.customGroups.length > 0) { + formatMarkerGroupsForDisplay(lines, result.customGroups, 0); + } else { + // Default aggregation by marker name + lines.push('By Name (top 15):'); + const topTypes = result.byType.slice(0, 15); + for (const stats of topTypes) { + let line = ` ${stats.markerName.padEnd(25)} ${stats.count.toString().padStart(5)} markers`; + + if (stats.durationStats) { + const { min, avg, max } = stats.durationStats; + line += ` (interval: min=${formatDuration(min)}, avg=${formatDuration(avg)}, max=${formatDuration(max)})`; + } else { + line += ' (instant)'; + } + + lines.push(line); + + // Show top markers with handles (for easy inspection) + if (!stats.subGroups && stats.topMarkers.length > 0) { + const handleList = stats.topMarkers + .slice(0, 3) + .map((m) => { + const stackIndicator = m.hasStack ? '✓' : '✗'; + const handleWithIndicator = `${m.handle} ${stackIndicator}`; + if (m.duration !== undefined) { + return `${handleWithIndicator} (${formatDuration(m.duration)})`; + } + return handleWithIndicator; + }) + .join(', '); + lines.push(` Examples: ${handleList}`); + } + + // Show sub-groups if present (from auto-grouping) + if (stats.subGroups && stats.subGroups.length > 0) { + if (stats.subGroupKey) { + lines.push(` Grouped by ${stats.subGroupKey}:`); + } + formatMarkerGroupsForDisplay(lines, stats.subGroups, 2); + } + } + + if (result.byType.length > 15) { + lines.push(` ... (${result.byType.length - 15} more marker names)`); + } + + lines.push(''); + + // Aggregate by category + lines.push('By Category:'); + for (const stats of result.byCategory) { + lines.push( + ` ${stats.categoryName.padEnd(25)} ${stats.count.toString().padStart(5)} markers (${stats.percentage.toFixed(1)}%)` + ); + } + + lines.push(''); + + // Frequency analysis for top markers + lines.push('Frequency Analysis:'); + const topRateTypes = result.byType + .filter((s) => s.rateStats && s.rateStats.markersPerSecond > 0) + .slice(0, 5); + + for (const stats of topRateTypes) { + if (!stats.rateStats) continue; + const { markersPerSecond, minGap, avgGap, maxGap } = stats.rateStats; + lines.push( + ` ${stats.markerName}: ${markersPerSecond.toFixed(1)} markers/sec (interval: min=${formatDuration(minGap)}, avg=${formatDuration(avgGap)}, max=${formatDuration(maxGap)})` + ); + } + + lines.push(''); + } + + lines.push( + 'Use --search , --category , --min-duration , --max-duration , --has-stack, --limit , --group-by , or --auto-group to filter/group markers, or m- handles to inspect individual markers.' + ); + + return lines.join('\n'); +} + +/** + * Helper function to format marker groups hierarchically. + */ +function formatMarkerGroupsForDisplay( + lines: string[], + groups: MarkerGroupData[], + baseIndent: number +): void { + for (const group of groups) { + const indent = ' '.repeat(baseIndent); + let line = `${indent}${group.groupName}: ${group.count} markers`; + + if (group.durationStats) { + const { avg, max } = group.durationStats; + line += ` (avg=${formatDuration(avg)}, max=${formatDuration(max)})`; + } + + lines.push(line); + + // Show top markers if no sub-groups + if (!group.subGroups && group.topMarkers.length > 0) { + const handleList = group.topMarkers + .slice(0, 3) + .map((m) => { + const stackIndicator = m.hasStack ? '✓' : '✗'; + const handleWithIndicator = `${m.handle} ${stackIndicator}`; + if (m.duration !== undefined) { + return `${handleWithIndicator} (${formatDuration(m.duration)})`; + } + return handleWithIndicator; + }) + .join(', '); + lines.push(`${indent} Examples: ${handleList}`); + } + + // Recursively format sub-groups + if (group.subGroups && group.subGroups.length > 0) { + formatMarkerGroupsForDisplay(lines, group.subGroups, baseIndent + 1); + } + } +} + +/** + * Helper function to format duration in milliseconds. + */ +function formatDuration(ms: number): string { + if (ms < 1) { + return `${(ms * 1000).toFixed(0)}µs`; + } else if (ms < 1000) { + return `${ms.toFixed(2)}ms`; + } + return `${(ms / 1000).toFixed(2)}s`; +} + +/** + * Format a ThreadFunctionsResult as plain text. + */ +export function formatThreadFunctionsResult( + result: WithContext +): string { + const contextHeader = formatContextHeader(result.context); + const lines: string[] = [contextHeader, '']; + + // Check if filters are active + const hasFilters = result.filters !== undefined; + const filterSuffix = + hasFilters && result.filteredFunctionCount !== result.totalFunctionCount + ? ` (filtered from ${result.totalFunctionCount})` + : ''; + + lines.push( + `Functions in thread ${result.threadHandle} (${result.friendlyThreadName}) — ${result.filteredFunctionCount} functions${filterSuffix}\n` + ); + + if (result.filteredFunctionCount === 0) { + if (hasFilters) { + lines.push('No functions match the specified filters.'); + } else { + lines.push('No functions in this thread.'); + } + return lines.join('\n'); + } + + // Show active filters if any + if (hasFilters && result.filters) { + const filterParts: string[] = []; + if (result.filters.searchString) { + filterParts.push(`search: "${result.filters.searchString}"`); + } + if (result.filters.minSelf !== undefined) { + filterParts.push(`min-self: ${result.filters.minSelf}%`); + } + if (result.filters.limit !== undefined) { + filterParts.push(`limit: ${result.filters.limit}`); + } + if (filterParts.length > 0) { + lines.push(`Filters: ${filterParts.join(', ')}\n`); + } + } + + // List functions sorted by self time + lines.push('Functions (by self time):'); + for (const func of result.functions) { + const selfCount = Math.round(func.selfSamples); + const totalCount = Math.round(func.totalSamples); + const displayName = truncateFunctionName(func.nameWithLibrary, 120); + + // Format percentages: show dual percentages when zoomed + let selfPctStr: string; + let totalPctStr: string; + if ( + func.fullSelfPercentage !== undefined && + func.fullTotalPercentage !== undefined + ) { + // Zoomed: show both view and full percentages + selfPctStr = `${func.selfPercentage.toFixed(1)}% of view, ${func.fullSelfPercentage.toFixed(1)}% of full`; + totalPctStr = `${func.totalPercentage.toFixed(1)}% of view, ${func.fullTotalPercentage.toFixed(1)}% of full`; + } else { + // Not zoomed: show single percentage + selfPctStr = `${func.selfPercentage.toFixed(1)}%`; + totalPctStr = `${func.totalPercentage.toFixed(1)}%`; + } + + lines.push( + ` ${func.functionHandle}. ${displayName} - self: ${selfCount} (${selfPctStr}), total: ${totalCount} (${totalPctStr})` + ); + } + + if (result.filteredFunctionCount > result.functions.length) { + const omittedCount = result.filteredFunctionCount - result.functions.length; + lines.push(`\n ... (${omittedCount} more functions omitted)`); + } + + lines.push(''); + lines.push( + 'Use --search , --min-self , or --limit to filter functions, or f- handles to inspect individual functions.' + ); + + return lines.join('\n'); +} diff --git a/src/profile-query-cli/index.ts b/src/profile-query-cli/index.ts new file mode 100644 index 0000000000..1e36595e7f --- /dev/null +++ b/src/profile-query-cli/index.ts @@ -0,0 +1,664 @@ +/** + * CLI entry point for pq (Profile Querier). + * + * Usage: + * pq load [--session ] Start a new daemon and load a profile + * pq profile info [--session ] Print profile summary + * pq thread info [--thread ] Print thread information + * pq thread samples [--thread ] Show thread call tree and top functions + * pq stop [--session ] [--all] Stop the daemon + * pq list-sessions List all running sessions + * + * Build: + * yarn build-profile-query-cli + * + * Run: + * pq (if pq is in PATH) + * ./dist/pq.js (direct invocation) + * + * Helper scripts: + * ./bin/pq-test Quick smoke test + * ./bin/pq-test-multi Test multiple concurrent sessions + * ./bin/pq-status Show session status + * ./bin/pq-clean Clean up sessions (--logs to also remove logs) + */ + +import * as path from 'path'; +import * as os from 'os'; +import minimist from 'minimist'; +import { startDaemon } from './daemon'; +import { sendCommand, startNewDaemon, stopDaemon } from './client'; +import { cleanupSession, listSessions, validateSession } from './session'; +import type { + MarkerFilterOptions, + FunctionFilterOptions, + CommandResult, + CallTreeCollectionOptions, + CallTreeScoringStrategy, +} from './protocol'; +import { + formatStatusResult, + formatFunctionExpandResult, + formatFunctionInfoResult, + formatViewRangeResult, + formatThreadInfoResult, + formatMarkerStackResult, + formatMarkerInfoResult, + formatProfileInfoResult, + formatThreadSamplesResult, + formatThreadSamplesTopDownResult, + formatThreadSamplesBottomUpResult, + formatThreadMarkersResult, + formatThreadFunctionsResult, +} from './formatters'; + +// Read session directory from environment (only place this is read) +const SESSION_DIR = + process.env.PQ_SESSION_DIR || path.join(os.homedir(), '.pq'); + +interface Args { + _: string[]; + session?: string; + daemon?: boolean; + help?: boolean; + h?: boolean; + json?: boolean; + 'max-lines'?: number; + scoring?: string; +} + +function printUsage(): void { + console.log(`Usage: pq [options] + +Commands: + load Load a profile and start a daemon session + profile info Print profile summary (processes, threads, CPU activity) + thread info Print detailed thread information + thread select Select a thread (e.g., t-0, t-1) + thread samples Show hot functions list for a thread + thread samples-top-down Show top-down call tree (where CPU time is spent) + thread samples-bottom-up Show bottom-up call tree (what calls hot functions) + thread markers List markers with aggregated statistics + thread functions List all functions with CPU percentages + marker info Show detailed marker information (e.g., m-1234) + marker stack Show full stack trace for a marker (e.g., m-1234) + function expand Show full untruncated function name (e.g., f-1) + function info Show detailed function information + zoom push Push a zoom range (e.g., 2.7,3.1 or ts-g,ts-G or m-158) + zoom pop Pop the most recent zoom range + zoom clear Clear all zoom ranges (return to full profile) + status Show session status (selected thread, zoom ranges) + stop Stop the daemon session + list-sessions List all running daemon sessions + +Options: + --session Use a specific session (default: current session) + --thread Specify thread by handle (e.g., t-0, t-1) + --marker Specify marker by handle (e.g., m-1, m-2) + --function Specify function by handle (e.g., f-1, f-2) + --search Search/filter by substring (for 'thread markers' and 'thread functions') + --category Filter markers by category name (case-insensitive substring match) + --min-duration Filter markers by minimum duration in milliseconds + --max-duration Filter markers by maximum duration in milliseconds + --min-self Filter functions by minimum self time percentage (for 'thread functions') + --has-stack Filter to show only markers with stack traces + --limit Limit the number of results shown + --group-by Group markers by custom keys (e.g., "type,name" or "type,field:eventType") + --auto-group Automatically determine grouping based on field variance + --max-lines Maximum nodes in call tree (for 'samples-top-down'/'samples-bottom-up', default: 100) + --scoring Call tree scoring: exponential-0.95, exponential-0.9 (default), exponential-0.8, + harmonic-0.1, harmonic-0.5, harmonic-1.0, percentage-only + --json Output results as JSON (for use with jq, etc.) + --help, -h Show this help message + +Examples: + pq load profile.json.gz + pq profile info + pq thread info --thread t-0 + pq thread samples + pq thread markers + pq thread functions + pq thread functions --search Present + pq thread functions --min-self 1 + pq thread functions --limit 50 + pq thread markers --search DOMEvent + pq thread markers --category Graphics + pq thread markers --min-duration 10 + pq thread markers --max-duration 100 + pq thread markers --has-stack + pq thread markers --limit 1000 + pq thread markers --category Layout --min-duration 5 + pq thread markers --search Reflow --min-duration 5 --max-duration 50 + pq thread markers --has-stack --category Other --limit 500 + pq thread markers --group-by type,name + pq thread markers --group-by type,field:eventType + pq thread markers --auto-group + pq thread markers --search DOMEvent --group-by field:eventType + pq marker info m-1234 + pq marker stack m-1234 + pq function expand f-12 + pq function info f-12 + pq zoom push 2.7,3.1 + pq zoom push m-158 + pq zoom pop + pq zoom clear + pq status + pq stop + pq list-sessions + pq thread samples-top-down --max-lines 50 + pq thread samples-top-down --scoring exponential-0.8 + pq thread samples-bottom-up --max-lines 200 --scoring harmonic-1.0 +`); +} + +/** + * Format command result for output. + * If json flag is set, output JSON. Otherwise output as string. + */ +function formatOutput( + result: string | CommandResult, + jsonFlag: boolean +): string { + if (jsonFlag) { + if (typeof result === 'string') { + // Legacy string result - wrap in a simple JSON structure + return JSON.stringify({ type: 'text', result }, null, 2); + } + // Structured result - output as JSON + return JSON.stringify(result, null, 2); + } + + // Plain text output + if (typeof result === 'string') { + return result; + } + + // Format structured results as plain text + switch (result.type) { + case 'status': + return formatStatusResult(result); + case 'function-expand': + return formatFunctionExpandResult(result); + case 'function-info': + return formatFunctionInfoResult(result); + case 'view-range': + return formatViewRangeResult(result); + case 'thread-info': + return formatThreadInfoResult(result); + case 'marker-stack': + return formatMarkerStackResult(result); + case 'marker-info': + return formatMarkerInfoResult(result); + case 'profile-info': + return formatProfileInfoResult(result); + case 'thread-samples': + return formatThreadSamplesResult(result); + case 'thread-samples-top-down': + return formatThreadSamplesTopDownResult(result); + case 'thread-samples-bottom-up': + return formatThreadSamplesBottomUpResult(result); + case 'thread-markers': + return formatThreadMarkersResult(result); + case 'thread-functions': + return formatThreadFunctionsResult(result); + default: + // For types without formatters yet, fall back to JSON + return JSON.stringify(result, null, 2); + } +} + +async function main(): Promise { + const argv = minimist(process.argv.slice(2), { + string: [ + 'session', + 'thread', + 'marker', + 'sample', + 'function', + 'search', + 'min-duration', + 'max-duration', + 'min-self', + 'category', + 'limit', + 'group-by', + 'max-lines', + 'scoring', + ], + boolean: ['daemon', 'help', 'h', 'all', 'has-stack', 'auto-group', 'json'], + alias: { h: 'help' }, + }); + + // Check for help flag + if (argv.help || argv.h) { + printUsage(); + process.exit(0); + } + + // Internal flag: running as daemon + if (argv.daemon) { + const profilePath = argv._[0]; + if (!profilePath) { + console.error('Error: Profile path required for daemon mode'); + process.exit(1); + } + await startDaemon(SESSION_DIR, profilePath, argv.session); + return; + } + + // Parse command + const command = argv._[0]; + + if (!command) { + console.error('Error: No command specified\n'); + printUsage(); + process.exit(1); + } + + try { + switch (command) { + case 'help': { + printUsage(); + break; + } + + case 'load': { + const profilePath = argv._[1]; + if (!profilePath) { + console.error('Error: Profile path required for "load" command'); + console.error('Usage: pq load [--session ]'); + process.exit(1); + } + + console.log(`Loading profile from ${profilePath}...`); + const sessionId = await startNewDaemon( + SESSION_DIR, + profilePath, + argv.session + ); + console.log(`Session started: ${sessionId}`); + break; + } + + case 'profile': { + const subcommand = argv._[1] ?? 'info'; + if (subcommand === 'info' || subcommand === 'threads') { + const result = await sendCommand( + SESSION_DIR, + { command: 'profile', subcommand }, + argv.session + ); + console.log(formatOutput(result, argv.json || false)); + } else { + console.error(`Error: Unknown command ${command} ${subcommand}`); + process.exit(1); + } + break; + } + + case 'thread': { + const subcommand = argv._[1] ?? 'info'; + // For thread select, get the thread handle from argv._[2] if not provided via --thread flag + const thread = + argv.thread ?? (subcommand === 'select' ? argv._[2] : undefined); + + // Parse marker filter options if this is a markers command + let markerFilters: MarkerFilterOptions | undefined; + let functionFilters: FunctionFilterOptions | undefined; + + if (subcommand === 'markers') { + const hasSearch = !!argv.search; + const hasMinDuration = !!argv['min-duration']; + const hasMaxDuration = !!argv['max-duration']; + const hasCategory = !!argv.category; + const hasStack = argv['has-stack']; + const hasLimit = !!argv.limit; + const hasGroupBy = !!argv['group-by']; + const hasAutoGroup = argv['auto-group']; + + if ( + hasSearch || + hasMinDuration || + hasMaxDuration || + hasCategory || + hasStack || + hasLimit || + hasGroupBy || + hasAutoGroup + ) { + markerFilters = {}; + if (hasSearch) { + markerFilters.searchString = argv.search; + } + if (hasMinDuration) { + const minDuration = parseFloat(argv['min-duration']); + if (isNaN(minDuration) || minDuration < 0) { + console.error( + 'Error: --min-duration must be a positive number (in milliseconds)' + ); + process.exit(1); + } + markerFilters.minDuration = minDuration; + } + if (hasMaxDuration) { + const maxDuration = parseFloat(argv['max-duration']); + if (isNaN(maxDuration) || maxDuration < 0) { + console.error( + 'Error: --max-duration must be a positive number (in milliseconds)' + ); + process.exit(1); + } + markerFilters.maxDuration = maxDuration; + } + if (hasCategory) { + markerFilters.category = argv.category; + } + if (hasStack) { + markerFilters.hasStack = true; + } + if (hasLimit) { + const limit = parseInt(argv.limit, 10); + if (isNaN(limit) || limit <= 0) { + console.error('Error: --limit must be a positive integer'); + process.exit(1); + } + markerFilters.limit = limit; + } + if (hasGroupBy) { + markerFilters.groupBy = argv['group-by']; + } + if (hasAutoGroup) { + markerFilters.autoGroup = true; + } + } + } + + // Parse function filter options if this is a functions command + if (subcommand === 'functions') { + const hasSearch = !!argv.search; + const hasMinSelf = !!argv['min-self']; + const hasLimit = !!argv.limit; + + if (hasSearch || hasMinSelf || hasLimit) { + functionFilters = {}; + if (hasSearch) { + functionFilters.searchString = argv.search; + } + if (hasMinSelf) { + const minSelf = parseFloat(argv['min-self']); + if (isNaN(minSelf) || minSelf < 0 || minSelf > 100) { + console.error( + 'Error: --min-self must be a number between 0 and 100 (percentage)' + ); + process.exit(1); + } + functionFilters.minSelf = minSelf; + } + if (hasLimit) { + const limit = parseInt(argv.limit, 10); + if (isNaN(limit) || limit <= 0) { + console.error('Error: --limit must be a positive integer'); + process.exit(1); + } + functionFilters.limit = limit; + } + } + } + + // Parse call tree options for samples-top-down and samples-bottom-up + let callTreeOptions: CallTreeCollectionOptions | undefined; + if ( + subcommand === 'samples-top-down' || + subcommand === 'samples-bottom-up' + ) { + const hasMaxLines = !!argv['max-lines']; + const hasScoring = !!argv.scoring; + + if (hasMaxLines || hasScoring) { + callTreeOptions = {}; + if (hasMaxLines) { + const maxLines = parseInt(String(argv['max-lines']), 10); + if (isNaN(maxLines) || maxLines <= 0) { + console.error('Error: --max-lines must be a positive integer'); + process.exit(1); + } + callTreeOptions.maxNodes = maxLines; + } + if (hasScoring) { + const validStrategies = [ + 'exponential-0.95', + 'exponential-0.92', + 'exponential-0.9', + 'exponential-0.8', + 'harmonic-0.1', + 'harmonic-0.5', + 'harmonic-1.0', + 'percentage-only', + ]; + const scoringValue = argv.scoring; + if (!scoringValue || !validStrategies.includes(scoringValue)) { + console.error( + `Error: --scoring must be one of: ${validStrategies.join(', ')}` + ); + process.exit(1); + } + callTreeOptions.scoringStrategy = + scoringValue as CallTreeScoringStrategy; + } + } + } + + if ( + subcommand === 'info' || + subcommand === 'select' || + subcommand === 'samples' || + subcommand === 'samples-top-down' || + subcommand === 'samples-bottom-up' || + subcommand === 'markers' || + subcommand === 'functions' + ) { + const result = await sendCommand( + SESSION_DIR, + { + command: 'thread', + subcommand, + thread, + markerFilters, + functionFilters, + callTreeOptions, + }, + argv.session + ); + console.log(formatOutput(result, argv.json || false)); + } else { + console.error(`Error: Unknown command ${command} ${subcommand}`); + process.exit(1); + } + break; + } + + case 'marker': { + const subcommand = argv._[1] ?? 'info'; + // For marker commands, get the marker handle from argv._[2] if not provided via --marker flag + const marker = + argv.marker ?? + (subcommand === 'info' || + subcommand === 'select' || + subcommand === 'stack' + ? argv._[2] + : undefined); + if ( + subcommand === 'info' || + subcommand === 'select' || + subcommand === 'stack' + ) { + const result = await sendCommand( + SESSION_DIR, + { command: 'marker', subcommand, marker }, + argv.session + ); + console.log(formatOutput(result, argv.json || false)); + } else { + console.error(`Error: Unknown command ${command} ${subcommand}`); + process.exit(1); + } + break; + } + + case 'sample': { + const subcommand = argv._[1] ?? 'info'; + const sample = argv.sample; + if (subcommand === 'info' || subcommand === 'select') { + const result = await sendCommand( + SESSION_DIR, + { command: 'sample', subcommand, sample }, + argv.session + ); + console.log(formatOutput(result, argv.json || false)); + } else { + console.error(`Error: Unknown command ${command} ${subcommand}`); + process.exit(1); + } + break; + } + + case 'function': { + const subcommand = argv._[1] ?? 'info'; + // For function commands, get the function handle from argv._[2] if not provided via --function flag + const function_ = + argv.function ?? + (subcommand === 'info' || + subcommand === 'expand' || + subcommand === 'select' + ? argv._[2] + : undefined); + if ( + subcommand === 'info' || + subcommand === 'expand' || + subcommand === 'select' + ) { + const result = await sendCommand( + SESSION_DIR, + { command: 'function', subcommand, function: function_ }, + argv.session + ); + console.log(formatOutput(result, argv.json || false)); + } else { + console.error(`Error: Unknown command ${command} ${subcommand}`); + process.exit(1); + } + break; + } + + case 'zoom': { + const subcommand = argv._[1]; + if (!subcommand) { + console.error('Error: zoom command requires a subcommand'); + console.error('Usage: pq zoom [range]'); + process.exit(1); + } + if (subcommand === 'push') { + const range = argv._[2]; + if (!range) { + console.error('Error: zoom push requires a range argument'); + console.error('Usage: pq zoom push '); + console.error('Example: pq zoom push 2.7,3.1'); + process.exit(1); + } + const result = await sendCommand( + SESSION_DIR, + { command: 'zoom', subcommand: 'push', range }, + argv.session + ); + console.log(formatOutput(result, argv.json || false)); + } else if (subcommand === 'pop') { + const result = await sendCommand( + SESSION_DIR, + { command: 'zoom', subcommand: 'pop' }, + argv.session + ); + console.log(formatOutput(result, argv.json || false)); + } else if (subcommand === 'clear') { + const result = await sendCommand( + SESSION_DIR, + { command: 'zoom', subcommand: 'clear' }, + argv.session + ); + console.log(formatOutput(result, argv.json || false)); + } else { + console.error(`Error: Unknown command ${command} ${subcommand}`); + process.exit(1); + } + break; + } + + case 'status': { + const result = await sendCommand( + SESSION_DIR, + { command: 'status' }, + argv.session + ); + console.log(formatOutput(result, argv.json || false)); + break; + } + + case 'stop': { + if (argv.all) { + const sessionIds = listSessions(SESSION_DIR); + await Promise.all( + sessionIds.map((id) => stopDaemon(SESSION_DIR, id)) + ); + } else { + await stopDaemon(SESSION_DIR, argv.session); + } + break; + } + + case 'list-sessions': { + const sessionIds = listSessions(SESSION_DIR); + let numCleaned = 0; + const runningSessionMetadata = []; + for (const sessionId of sessionIds) { + const metadata = validateSession(SESSION_DIR, sessionId); + if (metadata === null) { + cleanupSession(SESSION_DIR, sessionId); + numCleaned++; + continue; + } + runningSessionMetadata.push(metadata); + } + + if (numCleaned !== 0) { + console.log(`Cleaned up ${numCleaned} stale sessions.`); + console.log(); + } + runningSessionMetadata.sort( + (a, b) => + new Date(a.createdAt).getTime() - new Date(b.createdAt).getTime() + ); + console.log(`Found ${runningSessionMetadata.length} running sessions:`); + for (const metadata of runningSessionMetadata) { + console.log( + `- ${metadata.id}, created at ${metadata.createdAt} [daemon pid: ${metadata.pid}]` + ); + } + + break; + } + + default: { + console.error(`Error: Unknown command "${command}"\n`); + printUsage(); + process.exit(1); + } + } + } catch (error) { + console.error(`Error: ${error instanceof Error ? error.message : error}`); + process.exit(1); + } +} + +main().catch((error) => { + console.error(`Fatal error: ${error}`); + process.exit(1); +}); diff --git a/src/profile-query-cli/package.json b/src/profile-query-cli/package.json new file mode 100644 index 0000000000..dba0aecd0e --- /dev/null +++ b/src/profile-query-cli/package.json @@ -0,0 +1,35 @@ +{ + "_comment": "This is a distribution package for a pre-bundled CLI artifact. Build dependencies are defined in the root package.json. Run 'yarn build-profile-query-cli' from the repository root to build dist/pq.js.", + "name": "@firefox-profiler/pq", + "version": "0.1.0", + "description": "Command-line interface for querying Firefox Profiler profiles with persistent daemon sessions", + "main": "./dist/pq.js", + "bin": { + "pq": "./dist/pq.js" + }, + "files": [ + "dist/pq.js" + ], + "engines": { + "node": ">= 22 < 23" + }, + "keywords": [ + "profiler", + "firefox", + "performance", + "profiling", + "cli", + "performance-analysis" + ], + "author": "Mozilla DevTools", + "license": "MPL-2.0", + "repository": { + "type": "git", + "url": "https://github.com/firefox-devtools/profiler", + "directory": "src/profile-query-cli" + }, + "homepage": "https://profiler.firefox.com", + "bugs": { + "url": "https://github.com/firefox-devtools/profiler/issues" + } +} diff --git a/src/profile-query-cli/protocol.ts b/src/profile-query-cli/protocol.ts new file mode 100644 index 0000000000..cdf6e21c0b --- /dev/null +++ b/src/profile-query-cli/protocol.ts @@ -0,0 +1,128 @@ +/** + * Protocol for communication between pq client and daemon. + * Messages are sent as line-delimited JSON over Unix domain sockets. + */ + +// Re-export shared types from profile-query +export type { + MarkerFilterOptions, + FunctionFilterOptions, + SessionContext, + WithContext, + StatusResult, + FunctionExpandResult, + FunctionInfoResult, + ViewRangeResult, + ThreadInfoResult, + ThreadSamplesResult, + ThreadSamplesTopDownResult, + ThreadSamplesBottomUpResult, + CallTreeNode, + CallTreeScoringStrategy, + ThreadMarkersResult, + ThreadFunctionsResult, + DurationStats, + RateStats, + MarkerGroupData, + MarkerInfoResult, + MarkerStackResult, + StackTraceData, + ProfileInfoResult, +} from '../profile-query/types'; +export type { CallTreeCollectionOptions } from '../profile-query/formatters/call-tree'; + +// Import types for use in type definitions +import type { + MarkerFilterOptions, + FunctionFilterOptions, + WithContext, + StatusResult, + FunctionExpandResult, + FunctionInfoResult, + ViewRangeResult, + ThreadInfoResult, + MarkerStackResult, + MarkerInfoResult, + ProfileInfoResult, + ThreadSamplesResult, + ThreadSamplesTopDownResult, + ThreadSamplesBottomUpResult, + ThreadMarkersResult, + ThreadFunctionsResult, +} from '../profile-query/types'; +import type { CallTreeCollectionOptions } from '../profile-query/formatters/call-tree'; + +export type ClientMessage = + | { type: 'command'; command: ClientCommand } + | { type: 'shutdown' } + | { type: 'status' }; + +export type ClientCommand = + | { command: 'profile'; subcommand: 'info' | 'threads' } + | { + command: 'thread'; + subcommand: + | 'info' + | 'select' + | 'samples' + | 'samples-top-down' + | 'samples-bottom-up' + | 'markers' + | 'functions'; + thread?: string; + markerFilters?: MarkerFilterOptions; + functionFilters?: FunctionFilterOptions; + callTreeOptions?: CallTreeCollectionOptions; + } + | { + command: 'marker'; + subcommand: 'info' | 'select' | 'stack'; + marker?: string; + } + | { command: 'sample'; subcommand: 'info' | 'select'; sample?: string } + | { + command: 'function'; + subcommand: 'info' | 'select' | 'expand'; + function?: string; + } + | { + command: 'zoom'; + subcommand: 'push' | 'pop' | 'clear'; + range?: string; + } + | { command: 'status' }; + +export type ServerResponse = + | { type: 'success'; result: string | CommandResult } + | { type: 'error'; error: string } + | { type: 'loading' } + | { type: 'ready' }; + +/** + * CommandResult is a union of all possible structured result types. + * Commands can return either a string (legacy) or a structured result. + */ +export type CommandResult = + | StatusResult + | WithContext + | WithContext + | ViewRangeResult + | WithContext + | WithContext + | WithContext + | WithContext + | WithContext + | WithContext + | WithContext + | WithContext + | WithContext; + +export interface SessionMetadata { + id: string; + socketPath: string; + logPath: string; + pid: number; + profilePath: string; + createdAt: string; + buildHash: string; +} diff --git a/src/profile-query-cli/session.ts b/src/profile-query-cli/session.ts new file mode 100644 index 0000000000..a0a14c950c --- /dev/null +++ b/src/profile-query-cli/session.ts @@ -0,0 +1,202 @@ +/** + * Session management for pq daemon. + * Handles session files, socket paths, and current session tracking. + * + * All functions take an explicit sessionDir parameter for testability + * and to avoid global state. The CLI entry point reads PQ_SESSION_DIR + * once and passes it through. + */ + +import * as fs from 'fs'; +import * as path from 'path'; +import type { SessionMetadata } from './protocol'; + +/** + * Ensure the session directory exists. + */ +export function ensureSessionDir(sessionDir: string): void { + if (!fs.existsSync(sessionDir)) { + fs.mkdirSync(sessionDir, { recursive: true }); + } +} + +/** + * Generate a new session ID. + */ +export function generateSessionId(): string { + return Math.random().toString(36).substring(2, 15); +} + +/** + * Get the socket path for a session. + */ +export function getSocketPath(sessionDir: string, sessionId: string): string { + return path.join(sessionDir, `${sessionId}.sock`); +} + +/** + * Get the log path for a session. + */ +export function getLogPath(sessionDir: string, sessionId: string): string { + return path.join(sessionDir, `${sessionId}.log`); +} + +/** + * Get the metadata file path for a session. + */ +export function getMetadataPath(sessionDir: string, sessionId: string): string { + return path.join(sessionDir, `${sessionId}.json`); +} + +/** + * Save session metadata to disk. + */ +export function saveSessionMetadata( + sessionDir: string, + metadata: SessionMetadata +): void { + ensureSessionDir(sessionDir); + const metadataPath = getMetadataPath(sessionDir, metadata.id); + fs.writeFileSync(metadataPath, JSON.stringify(metadata, null, 2)); +} + +/** + * Load session metadata from disk. + */ +export function loadSessionMetadata( + sessionDir: string, + sessionId: string +): SessionMetadata | null { + const metadataPath = getMetadataPath(sessionDir, sessionId); + if (!fs.existsSync(metadataPath)) { + return null; + } + try { + const data = fs.readFileSync(metadataPath, 'utf-8'); + return JSON.parse(data) as SessionMetadata; + } catch (_error) { + return null; + } +} + +/** + * Set the current session by writing to a text file. + */ +export function setCurrentSession(sessionDir: string, sessionId: string): void { + ensureSessionDir(sessionDir); + + const currentSessionFile = path.join(sessionDir, 'current.txt'); + fs.writeFileSync(currentSessionFile, sessionId, 'utf-8'); +} + +/** + * Get the current session ID by reading from a text file. + */ +export function getCurrentSessionId(sessionDir: string): string | null { + const currentSessionFile = path.join(sessionDir, 'current.txt'); + + if (!fs.existsSync(currentSessionFile)) { + return null; + } + + try { + return fs.readFileSync(currentSessionFile, 'utf-8').trim(); + } catch (_error) { + console.error(`Failed to read current session: ${_error}`); + return null; + } +} + +/** + * Get the socket path for the current session. + */ +export function getCurrentSocketPath(sessionDir: string): string | null { + const sessionId = getCurrentSessionId(sessionDir); + + if (!sessionId) { + return null; + } + + return getSocketPath(sessionDir, sessionId); +} + +/** + * Check if a process is running. + */ +export function isProcessRunning(pid: number): boolean { + try { + // Sending signal 0 checks if process exists without killing it + process.kill(pid, 0); + return true; + } catch (_error) { + return false; + } +} + +/** + * Clean up a session's files. + */ +export function cleanupSession(sessionDir: string, sessionId: string): void { + const socketPath = getSocketPath(sessionDir, sessionId); + const metadataPath = getMetadataPath(sessionDir, sessionId); + const currentSessionFile = path.join(sessionDir, 'current.txt'); + // Note: We intentionally don't delete the log file for debugging purposes + // const logPath = getLogPath(sessionDir, sessionId); + + // Remove socket file + if (fs.existsSync(socketPath)) { + fs.unlinkSync(socketPath); + } + + // Remove metadata file + if (fs.existsSync(metadataPath)) { + fs.unlinkSync(metadataPath); + } + + // Remove current session file if it points to this session + const currentSessionId = getCurrentSessionId(sessionDir); + if (currentSessionId === sessionId && fs.existsSync(currentSessionFile)) { + fs.unlinkSync(currentSessionFile); + } +} + +/** + * Validate that a session is healthy (process running, socket exists). + * If not, clean up stale files. + */ +export function validateSession( + sessionDir: string, + sessionId: string +): SessionMetadata | null { + const metadata = loadSessionMetadata(sessionDir, sessionId); + if (!metadata) { + return null; + } + + // Check if process is still running + if (!isProcessRunning(metadata.pid)) { + // console.error( + // `Session ${sessionId} daemon process with PID ${metadata.pid} not found. Cleaning up.` + // ); + return null; + } + + // Check if socket exists + if (!fs.existsSync(metadata.socketPath)) { + // console.error(`Session ${sessionId} socket not found. Cleaning up.`); + return null; + } + + return metadata; +} + +/** + * List all session IDs. + */ +export function listSessions(sessionDir: string): string[] { + ensureSessionDir(sessionDir); + const files = fs.readdirSync(sessionDir); + return files + .filter((f) => f.endsWith('.json')) + .map((f) => path.basename(f, '.json')); +} diff --git a/src/profile-query-cli/tests/basic.test.ts b/src/profile-query-cli/tests/basic.test.ts new file mode 100644 index 0000000000..262308bee7 --- /dev/null +++ b/src/profile-query-cli/tests/basic.test.ts @@ -0,0 +1,95 @@ +/** + * Basic CLI functionality tests. + * Migrated from bin/pq-test bash script. + */ + +import { readdir } from 'fs/promises'; +import { + createTestContext, + cleanupTestContext, + pq, + pqFail, + type PqTestContext, +} from './utils'; + +describe('pq basic functionality', () => { + let ctx: PqTestContext; + + beforeEach(async () => { + ctx = await createTestContext(); + }); + + afterEach(async () => { + await cleanupTestContext(ctx); + }); + + test('load creates a session', async () => { + const result = await pq(ctx, [ + 'load', + 'src/test/fixtures/upgrades/processed-1.json', + ]); + + expect(result.exitCode).toBe(0); + expect(result.stdout).toContain('Loading profile from'); + expect(result.stdout).toContain('Session started:'); + + // Extract session ID + expect(typeof result.stdout).toBe('string'); + const match = (result.stdout as string).match(/Session started: (\w+)/); + expect(match).toBeTruthy(); + const sessionId = match![1]; + + // Verify session files exist + const files = await readdir(ctx.sessionDir); + expect(files).toContain(`${sessionId}.sock`); + expect(files).toContain(`${sessionId}.json`); + expect(files).toContain('current.txt'); + }); + + test('profile info works after load', async () => { + await pq(ctx, ['load', 'src/test/fixtures/upgrades/processed-1.json']); + + const result = await pq(ctx, ['profile', 'info']); + + expect(result.exitCode).toBe(0); + expect(result.stdout).toContain('This profile contains'); + }); + + test('stop cleans up session', async () => { + await pq(ctx, ['load', 'src/test/fixtures/upgrades/processed-1.json']); + await pq(ctx, ['stop']); + + // Verify socket is removed (the main cleanup requirement) + const files = await readdir(ctx.sessionDir); + expect(files.filter((f) => f.endsWith('.sock'))).toHaveLength(0); + }); + + test('load fails for missing file', async () => { + const result = await pqFail(ctx, ['load', '/nonexistent/file.json']); + + expect(result.exitCode).not.toBe(0); + const output = String(result.stdout || '') + String(result.stderr || ''); + expect(output).toContain('not found'); + }); + + test('profile info fails without active session', async () => { + const result = await pqFail(ctx, ['profile', 'info']); + + expect(result.exitCode).not.toBe(0); + const output = String(result.stdout || '') + String(result.stderr || ''); + expect(output).toContain('No active session'); + }); + + test('multiple profile info calls work (daemon stays running)', async () => { + await pq(ctx, ['load', 'src/test/fixtures/upgrades/processed-1.json']); + + // First call + const result1 = await pq(ctx, ['profile', 'info']); + expect(result1.exitCode).toBe(0); + + // Second call - should still work (daemon running) + const result2 = await pq(ctx, ['profile', 'info']); + expect(result2.exitCode).toBe(0); + expect(result2.stdout).toEqual(result1.stdout); + }); +}); diff --git a/src/profile-query-cli/tests/daemon-startup.test.ts b/src/profile-query-cli/tests/daemon-startup.test.ts new file mode 100644 index 0000000000..8203874dd4 --- /dev/null +++ b/src/profile-query-cli/tests/daemon-startup.test.ts @@ -0,0 +1,116 @@ +/** + * Tests for two-phase daemon startup behavior. + * Verifies socket creation before profile loading and proper status reporting. + */ + +import { readFile, access } from 'fs/promises'; +import { join } from 'path'; +import { + createTestContext, + cleanupTestContext, + pq, + pqFail, + type PqTestContext, +} from './utils'; + +describe('daemon startup (two-phase)', () => { + let ctx: PqTestContext; + + beforeEach(async () => { + ctx = await createTestContext(); + }); + + afterEach(async () => { + await cleanupTestContext(ctx); + }); + + test('daemon creates socket and metadata before loading profile', async () => { + const startTime = Date.now(); + + const result = await pq(ctx, [ + 'load', + 'src/test/fixtures/upgrades/processed-1.json', + ]); + + const endTime = Date.now(); + const duration = endTime - startTime; + + expect(result.exitCode).toBe(0); + + // Should complete quickly (< 1 second for local file) + // The key improvement is that we don't wait for profile parsing + // before getting success feedback + expect(duration).toBeLessThan(2000); + + // Extract session ID + expect(typeof result.stdout).toBe('string'); + const match = (result.stdout as string).match(/Session started: (\w+)/); + const sessionId = match![1]; + + // Verify metadata file exists and contains correct info + const metadataPath = join(ctx.sessionDir, `${sessionId}.json`); + const metadata = JSON.parse(await readFile(metadataPath, 'utf-8')); + + expect(metadata.id).toBe(sessionId); + expect(metadata.socketPath).toContain(sessionId); + expect(metadata.pid).toBeNumber(); + expect(metadata.profilePath).toContain('processed-1.json'); + }); + + test('load returns non-zero exit code on profile load failure', async () => { + // Create an invalid JSON file + const invalidProfile = join(ctx.sessionDir, 'invalid.json'); + const { writeFile } = await import('fs/promises'); + await writeFile(invalidProfile, '{ invalid json content', 'utf-8'); + + const result = await pqFail(ctx, ['load', invalidProfile]); + + expect(result.exitCode).not.toBe(0); + const output = String(result.stdout || '') + String(result.stderr || ''); + expect(output).toMatch(/Profile load failed|Failed to|parse|invalid/i); + }); + + test('daemon startup fails fast with short timeout', async () => { + // This test verifies Phase 1 timeout behavior + // We can't easily force a daemon startup failure, but we can + // verify the timeout is reasonable by checking it doesn't wait forever + + const result = await pqFail(ctx, ['load', '/nonexistent/file.json']); + + // Should fail quickly (Phase 1: 500ms for daemon, Phase 2: fails on validation) + expect(result.exitCode).not.toBe(0); + }); + + test('load blocks until profile is fully loaded', async () => { + // Start loading + await pq(ctx, ['load', 'src/test/fixtures/upgrades/processed-1.json']); + + // If load returned, profile should be ready immediately + const result = await pq(ctx, ['profile', 'info']); + expect(result.exitCode).toBe(0); + expect(result.stdout).toContain('This profile contains'); + }); + + test('validates session before returning (checks process + socket)', async () => { + const result = await pq(ctx, [ + 'load', + 'src/test/fixtures/upgrades/processed-1.json', + ]); + + expect(typeof result.stdout).toBe('string'); + const match = (result.stdout as string).match(/Session started: (\w+)/); + const sessionId = match![1]; + + // Verify both socket and metadata exist (validateSession checks both) + const socketPath = join(ctx.sessionDir, `${sessionId}.sock`); + const metadataPath = join(ctx.sessionDir, `${sessionId}.json`); + + await expect(access(socketPath)).resolves.toBeUndefined(); + await expect(access(metadataPath)).resolves.toBeUndefined(); + + // Process should be running (metadata contains PID) + const metadata = JSON.parse(await readFile(metadataPath, 'utf-8')); + expect(metadata.pid).toBeNumber(); + expect(metadata.pid).toBeGreaterThan(0); + }); +}); diff --git a/src/profile-query-cli/tests/sessions.test.ts b/src/profile-query-cli/tests/sessions.test.ts new file mode 100644 index 0000000000..c8aacb3bcd --- /dev/null +++ b/src/profile-query-cli/tests/sessions.test.ts @@ -0,0 +1,117 @@ +/** + * Multi-session tests. + * Migrated from bin/pq-test-multi bash script. + */ + +import { + createTestContext, + cleanupTestContext, + pq, + type PqTestContext, +} from './utils'; + +describe('pq multiple concurrent sessions', () => { + let ctx: PqTestContext; + + beforeEach(async () => { + ctx = await createTestContext(); + }); + + afterEach(async () => { + await cleanupTestContext(ctx); + }); + + test('can run multiple sessions with explicit IDs', async () => { + const session1 = 'test-session-1'; + const session2 = 'test-session-2'; + const session3 = 'test-session-3'; + + // Start three sessions + await pq(ctx, [ + 'load', + 'src/test/fixtures/upgrades/processed-1.json', + '--session', + session1, + ]); + await pq(ctx, [ + 'load', + 'src/test/fixtures/upgrades/processed-2.json', + '--session', + session2, + ]); + await pq(ctx, [ + 'load', + 'src/test/fixtures/upgrades/processed-3.json', + '--session', + session3, + ]); + + // Query each session explicitly + const result1 = await pq(ctx, ['profile', 'info', '--session', session1]); + expect(result1.stdout).toContain('This profile contains'); + + const result2 = await pq(ctx, ['profile', 'info', '--session', session2]); + expect(result2.stdout).toContain('This profile contains'); + + // Query current session (should be session3) + const result3 = await pq(ctx, ['profile', 'info']); + expect(result3.stdout).toContain('This profile contains'); + + // Note: We don't assert that results differ, as different test profiles + // might coincidentally have identical summaries. + + // Stop all sessions + await pq(ctx, ['stop', '--session', session1]); + await pq(ctx, ['stop', '--session', session2]); + await pq(ctx, ['stop', '--session', session3]); + }); + + test('list-sessions shows running sessions', async () => { + // Start two sessions + await pq(ctx, [ + 'load', + 'src/test/fixtures/upgrades/processed-1.json', + '--session', + 'session-a', + ]); + await pq(ctx, [ + 'load', + 'src/test/fixtures/upgrades/processed-2.json', + '--session', + 'session-b', + ]); + + // List sessions + const result = await pq(ctx, ['list-sessions']); + + expect(result.stdout).toContain('Found 2 running sessions'); + expect(result.stdout).toContain('session-a'); + expect(result.stdout).toContain('session-b'); + + // Clean up + await pq(ctx, ['stop', '--all']); + }); + + test('stop --all stops all sessions', async () => { + // Start multiple sessions + await pq(ctx, [ + 'load', + 'src/test/fixtures/upgrades/processed-1.json', + '--session', + 'session-1', + ]); + await pq(ctx, [ + 'load', + 'src/test/fixtures/upgrades/processed-2.json', + '--session', + 'session-2', + ]); + + // Stop all + await pq(ctx, ['stop', '--all']); + + // Verify no sessions + const result = await pq(ctx, ['list-sessions']); + expect(result.stdout).toContain('Found 0 running sessions'); + }); +}); diff --git a/src/profile-query-cli/tests/setup.ts b/src/profile-query-cli/tests/setup.ts new file mode 100644 index 0000000000..a47b214d7c --- /dev/null +++ b/src/profile-query-cli/tests/setup.ts @@ -0,0 +1,7 @@ +/** + * Jest setup for CLI integration tests. + * These tests only need jest-extended, not the full browser test setup. + */ + +// Importing this makes jest-extended matchers available everywhere +import 'jest-extended/all'; diff --git a/src/profile-query-cli/tests/utils.ts b/src/profile-query-cli/tests/utils.ts new file mode 100644 index 0000000000..6f635c8a15 --- /dev/null +++ b/src/profile-query-cli/tests/utils.ts @@ -0,0 +1,161 @@ +/** + * Utilities for CLI integration tests. + */ + +import { spawn } from 'child_process'; +import { mkdtemp, rm } from 'fs/promises'; +import { tmpdir } from 'os'; +import { join } from 'path'; + +const PQ_BIN = './src/profile-query-cli/dist/pq.js'; + +/** + * Simple command execution result. + */ +export interface CommandResult { + stdout: string; + stderr: string; + exitCode: number; +} + +/** + * Execute a command and return stdout, stderr, and exit code. + * Simple replacement for execa that works with Jest without ESM complications. + */ +function exec( + command: string, + args: string[], + options: { + env?: Record; + timeout?: number; + } = {} +): Promise { + return new Promise((resolve, reject) => { + const proc = spawn(command, args, { + env: { ...process.env, ...options.env }, + }); + + let stdout = ''; + let stderr = ''; + let timedOut = false; + let timeoutId: NodeJS.Timeout | undefined; + + if (options.timeout) { + timeoutId = setTimeout(() => { + timedOut = true; + proc.kill('SIGTERM'); + setTimeout(() => proc.kill('SIGKILL'), 1000); + }, options.timeout); + } + + proc.stdout?.on('data', (data) => { + stdout += data.toString(); + }); + + proc.stderr?.on('data', (data) => { + stderr += data.toString(); + }); + + proc.on('close', (code) => { + if (timeoutId) clearTimeout(timeoutId); + + if (timedOut) { + reject(new Error(`Command timed out after ${options.timeout}ms`)); + } else { + resolve({ + stdout, + stderr, + exitCode: code ?? 1, + }); + } + }); + + proc.on('error', (err) => { + if (timeoutId) clearTimeout(timeoutId); + reject(err); + }); + }); +} + +/** + * Context for a pq test session. + */ +export interface PqTestContext { + sessionDir: string; + env: Record; +} + +/** + * Create a test context with isolated session directory. + * Each test should call this in beforeEach() for maximum isolation. + */ +export async function createTestContext(): Promise { + const sessionDir = await mkdtemp(join(tmpdir(), 'pq-test-')); + return { + sessionDir, + env: { PQ_SESSION_DIR: sessionDir }, + }; +} + +/** + * Clean up test context. + * Each test should call this in afterEach() to remove temp directory. + */ +export async function cleanupTestContext(ctx: PqTestContext): Promise { + await rm(ctx.sessionDir, { recursive: true, force: true }); +} + +/** + * Run a pq command. + */ +export async function runPq( + ctx: PqTestContext, + args: string[], + options?: { + reject?: boolean; + timeout?: number; + } +): Promise { + const result = await exec(PQ_BIN, args, { + env: ctx.env, + timeout: options?.timeout ?? 30000, + }); + + // Throw if reject is true (default) and command failed + if ((options?.reject ?? true) && result.exitCode !== 0) { + const error = new Error(`Command failed with exit code ${result.exitCode}`); + Object.assign(error, result); + throw error; + } + + return result; +} + +/** + * Run a pq command and expect it to succeed. + */ +export async function pq( + ctx: PqTestContext, + args: string[] +): Promise { + return runPq(ctx, args); +} + +/** + * Run a pq command and expect it to fail. + */ +export async function pqFail( + ctx: PqTestContext, + args: string[] +): Promise { + try { + await runPq(ctx, args); + throw new Error('Expected command to fail but it succeeded'); + } catch (error) { + if (error instanceof Error && error.message.includes('Expected command')) { + throw error; + } + // Return the error as a result (which has stdout/stderr/exitCode attached) + return error as CommandResult; + } +} diff --git a/src/profile-query-cli/webpack.config.js b/src/profile-query-cli/webpack.config.js new file mode 100644 index 0000000000..0c76b36be8 --- /dev/null +++ b/src/profile-query-cli/webpack.config.js @@ -0,0 +1,59 @@ +const path = require('path'); +const webpack = require('webpack'); +const projectRoot = path.join(__dirname, '../..'); +const includes = [path.join(projectRoot, 'src')]; + +// Generate a unique build hash based on timestamp +const BUILD_HASH = Date.now().toString(36); + +module.exports = { + name: 'profile-query-cli', + target: 'node', + mode: process.env.NODE_ENV, + stats: 'errors-only', + resolve: { + extensions: ['.js', '.jsx', '.ts', '.tsx'], + }, + output: { + path: path.resolve(__dirname, 'dist'), + filename: 'pq.js', + chunkLoading: false, + asyncChunks: false, + }, + entry: './src/profile-query-cli/index.ts', + module: { + rules: [ + { + test: /\.(js|ts|tsx)$/, + use: ['babel-loader'], + include: includes, + }, + ], + }, + plugins: [ + new webpack.BannerPlugin({ + banner: + '#!/usr/bin/env node\n\n// Polyfill browser globals for Node.js\nglobalThis.self = globalThis;', + raw: true, + }), + new webpack.DefinePlugin({ + __BUILD_HASH__: JSON.stringify(BUILD_HASH), + }), + // Ignore WASM demangle module for CLI build + new webpack.IgnorePlugin({ + resourceRegExp: /^gecko-profiler-demangle$/, + }), + // Replace SVG imports with empty string since CLI doesn't need icons + new webpack.NormalModuleReplacementPlugin(/\.svg$/, function (resource) { + resource.request = 'data:text/javascript,export default ""'; + }), + ], + experiments: { + // Make WebAssembly work just like in webpack v4 + syncWebAssembly: true, + }, + optimization: { + // Minify for npm distribution (reduces from 2.5MB to 640KB) + minimize: true, + }, +}; diff --git a/src/profile-query/README.md b/src/profile-query/README.md new file mode 100644 index 0000000000..3386e61bf5 --- /dev/null +++ b/src/profile-query/README.md @@ -0,0 +1,58 @@ +# Profile Query Library + +A library for programmatically querying the contents of a Firefox Profiler profile. + +## Usage + +**Note:** Most users should use the [profile-query-cli](../profile-query-cli/README.md) (`pq` command) instead of using this library directly. + +### Building + +```bash +yarn build-profile-query +``` + +### Programmatic Usage + +```javascript +// Node.js interactive session +const { ProfileQuerier } = (await import('./dist/profile-query.js')).default; + +// Load from file +const p1 = await ProfileQuerier.load('/path/to/profile.json.gz'); + +// Load from profiler.firefox.com URL +const p2 = await ProfileQuerier.load( + 'https://profiler.firefox.com/from-url/http%3A%2F%2Fexample.com%2Fprofile.json/' +); + +// Load from share URL +const p3 = await ProfileQuerier.load('https://share.firefox.dev/4oLEjCw'); + +// Query the profile +const profileInfo = await p1.profileInfo(); +const threadInfo = await p1.threadInfo(); +const samples = await p1.threadSamples(); +``` + +## Available Methods + +- `static async load(filePathOrUrl: string): Promise` - Load a profile from file or URL +- `async profileInfo(): Promise` - Get profile summary (processes, threads, CPU activity) +- `async threadInfo(threadHandle?: string): Promise` - Get detailed thread information +- `async threadSelect(threadHandle: string): Promise` - Select a thread for subsequent queries +- `async threadSamples(threadHandle?: string): Promise` - Get call tree and top functions for a thread +- `async pushViewRange(rangeName: string): Promise` - Push a zoom range (supports timestamps, marker handles, or time values) +- `async popViewRange(): Promise` - Pop the most recent zoom range +- `async getStatus(): Promise` - Get current session status (selected thread, zoom ranges) + +## Architecture + +The library is built on top of the Firefox Profiler's Redux store and selectors: + +- **ProfileQuerier**: Main class that wraps a Redux store and provides query methods +- **TimestampManager**: Manages timestamp naming for time range queries +- **ThreadMap**: Maps thread handles (e.g., "t-0", "t-1") to thread indices +- **Formatters**: Format query results as human-readable text + +All query results are returned as formatted strings, suitable for display in a terminal or log file. diff --git a/src/profile-query/cpu-activity.ts b/src/profile-query/cpu-activity.ts new file mode 100644 index 0000000000..dab814d5bc --- /dev/null +++ b/src/profile-query/cpu-activity.ts @@ -0,0 +1,208 @@ +import type { Slice, SliceTree } from 'firefox-profiler/utils/slice-tree'; +import type { TimestampManager } from './timestamps'; + +export interface CpuActivityEntry { + startTime: number; + startTimeName: string; + startTimeStr: string; // Formatted timestamp string (e.g., "6.991s") + endTime: number; + endTimeName: string; + endTimeStr: string; // Formatted timestamp string (e.g., "10.558s") + cpuMs: number; + depthLevel: number; +} + +function sliceToString( + slice: Slice, + time: number[], + tsManager: TimestampManager +): string { + const { avg, start, end } = slice; + const startTime = time[start]; + const endTime = time[end]; + const duration = endTime - startTime; + const startName = tsManager.nameForTimestamp(startTime); + const endName = tsManager.nameForTimestamp(endTime); + const startTimeStr = tsManager.timestampString(startTime); + const endTimeStr = tsManager.timestampString(endTime); + return `${Math.round(avg * 100)}% for ${duration.toFixed(1)}ms: [${startName} → ${endName}] (${startTimeStr} - ${endTimeStr})`; +} + +function appendSliceSubtree( + slices: Slice[], + startIndex: number, + parent: number | null, + childrenStartPerParent: number[], + interestingSliceIndexes: Set, + nestingDepth: number, + time: number[], + s: string[], + tsManager: TimestampManager +) { + for (let i = startIndex; i < slices.length; i++) { + if (!interestingSliceIndexes.has(i)) { + continue; + } + + const slice = slices[i]; + if (slice.parent !== parent) { + break; + } + + s.push( + ' '.repeat(nestingDepth) + '- ' + sliceToString(slice, time, tsManager) + ); + + const childrenStart = childrenStartPerParent[i]; + if (childrenStart !== null) { + appendSliceSubtree( + slices, + childrenStart, + i, + childrenStartPerParent, + interestingSliceIndexes, + nestingDepth + 1, + time, + s, + tsManager + ); + } + } +} + +export function printSliceTree( + { slices, time }: SliceTree, + tsManager: TimestampManager +): string[] { + if (slices.length === 0) { + return ['No significant activity.']; + } + + const childrenStartPerParent = new Array(slices.length); + const indexAndSumPerSlice = new Array(slices.length); + for (let i = 0; i < slices.length; i++) { + childrenStartPerParent[i] = null; + const { parent, sum } = slices[i]; + indexAndSumPerSlice.push({ i, sum }); + if (parent !== null && childrenStartPerParent[parent] === null) { + childrenStartPerParent[parent] = i; + } + } + indexAndSumPerSlice.sort((a, b) => b.sum - a.sum); + const interestingSliceIndexes = new Set( + indexAndSumPerSlice.slice(0, 20).map((x) => x.i) + ); + // console.log(interestingSliceIndexes); + + const s = new Array(); + appendSliceSubtree( + slices, + 0, + null, + childrenStartPerParent, + interestingSliceIndexes, + 0, + time, + s, + tsManager + ); + + return s; +} + +/** + * Collect CPU activity slices as structured data. + */ +export function collectSliceTree( + { slices, time }: SliceTree, + tsManager: TimestampManager +): CpuActivityEntry[] { + if (slices.length === 0) { + return []; + } + + const childrenStartPerParent = new Array(slices.length); + const indexAndSumPerSlice = new Array(slices.length); + for (let i = 0; i < slices.length; i++) { + childrenStartPerParent[i] = null; + const { parent, sum } = slices[i]; + indexAndSumPerSlice.push({ i, sum }); + if (parent !== null && childrenStartPerParent[parent] === null) { + childrenStartPerParent[parent] = i; + } + } + indexAndSumPerSlice.sort((a, b) => b.sum - a.sum); + const interestingSliceIndexes = new Set( + indexAndSumPerSlice.slice(0, 20).map((x) => x.i) + ); + + const result: CpuActivityEntry[] = []; + collectSliceSubtree( + slices, + 0, + null, + childrenStartPerParent, + interestingSliceIndexes, + 0, + time, + result, + tsManager + ); + + return result; +} + +function collectSliceSubtree( + slices: Slice[], + startIndex: number, + parent: number | null, + childrenStartPerParent: number[], + interestingSliceIndexes: Set, + nestingDepth: number, + time: number[], + result: CpuActivityEntry[], + tsManager: TimestampManager +) { + for (let i = startIndex; i < slices.length; i++) { + if (!interestingSliceIndexes.has(i)) { + continue; + } + + const slice = slices[i]; + if (slice.parent !== parent) { + break; + } + + const { start, end, avg } = slice; + const startTime = time[start]; + const endTime = time[end]; + const duration = endTime - startTime; + const cpuMs = duration * avg; + + result.push({ + startTime, + startTimeName: tsManager.nameForTimestamp(startTime), + startTimeStr: tsManager.timestampString(startTime), + endTime, + endTimeName: tsManager.nameForTimestamp(endTime), + endTimeStr: tsManager.timestampString(endTime), + cpuMs, + depthLevel: nestingDepth, + }); + + const childrenStart = childrenStartPerParent[i]; + if (childrenStart !== null) { + collectSliceSubtree( + slices, + childrenStart, + i, + childrenStartPerParent, + interestingSliceIndexes, + nestingDepth + 1, + time, + result, + tsManager + ); + } + } +} diff --git a/src/profile-query/formatters/call-tree.ts b/src/profile-query/formatters/call-tree.ts new file mode 100644 index 0000000000..06954b8a27 --- /dev/null +++ b/src/profile-query/formatters/call-tree.ts @@ -0,0 +1,340 @@ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +import type { CallTree } from 'firefox-profiler/profile-logic/call-tree'; +import type { + IndexIntoCallNodeTable, + ThreadIndex, + Lib, +} from 'firefox-profiler/types'; +import type { CallTreeNode, CallTreeScoringStrategy } from '../types'; +import type { FunctionMap } from '../function-map'; +import { formatFunctionNameWithLibrary } from '../function-list'; + +/** + * Compute inclusion score for a call tree node. + * The score determines priority for node budget selection. + * Property: score(child) ≤ score(parent) for any parent-child pair. + */ +function computeInclusionScore( + totalPercentage: number, + depth: number, + strategy: CallTreeScoringStrategy +): number { + switch (strategy) { + case 'exponential-0.95': + return totalPercentage * Math.pow(0.95, depth); + case 'exponential-0.9': + return totalPercentage * Math.pow(0.9, depth); + case 'exponential-0.8': + return totalPercentage * Math.pow(0.8, depth); + case 'harmonic-0.1': + return totalPercentage / (1 + 0.1 * depth); + case 'harmonic-0.5': + return totalPercentage / (1 + 0.5 * depth); + case 'harmonic-1.0': + return totalPercentage / (1 + depth); + case 'percentage-only': + return totalPercentage; + default: + // Default to exponential-0.9 + return totalPercentage * Math.pow(0.94, depth); + } +} + +/** + * Simple max-heap implementation for priority queue. + */ +class MaxHeap { + private items: Array<{ item: T; priority: number }> = []; + + push(item: T, priority: number): void { + this.items.push({ item, priority }); + this._bubbleUp(this.items.length - 1); + } + + popMax(): T | null { + if (this.items.length === 0) { + return null; + } + if (this.items.length === 1) { + return this.items.pop()!.item; + } + + const max = this.items[0].item; + this.items[0] = this.items.pop()!; + this._bubbleDown(0); + return max; + } + + isEmpty(): boolean { + return this.items.length === 0; + } + + size(): number { + return this.items.length; + } + + private _bubbleUp(index: number): void { + while (index > 0) { + const parentIndex = Math.floor((index - 1) / 2); + if (this.items[index].priority <= this.items[parentIndex].priority) { + break; + } + // Swap + [this.items[index], this.items[parentIndex]] = [ + this.items[parentIndex], + this.items[index], + ]; + index = parentIndex; + } + } + + private _bubbleDown(index: number): void { + while (true) { + const leftChild = 2 * index + 1; + const rightChild = 2 * index + 2; + let largest = index; + + if ( + leftChild < this.items.length && + this.items[leftChild].priority > this.items[largest].priority + ) { + largest = leftChild; + } + + if ( + rightChild < this.items.length && + this.items[rightChild].priority > this.items[largest].priority + ) { + largest = rightChild; + } + + if (largest === index) { + break; + } + + // Swap + [this.items[index], this.items[largest]] = [ + this.items[largest], + this.items[index], + ]; + index = largest; + } + } +} + +/** + * Internal node used during collection. + */ +type CollectionNode = { + callNodeIndex: IndexIntoCallNodeTable; + depth: number; +}; + +/** + * Options for call tree collection. + */ +export type CallTreeCollectionOptions = { + /** Maximum number of nodes to include. Default: 100 */ + maxNodes?: number; + /** Scoring strategy for node selection. Default: 'exponential-0.9' */ + scoringStrategy?: CallTreeScoringStrategy; + /** Maximum depth to traverse (safety limit). Default: 200 */ + maxDepth?: number; + /** Maximum children to expand per node. Default: 100 */ + maxChildrenPerNode?: number; +}; + +/** + * Collect call tree data using heap-based expansion. + * This works for both top-down and bottom-up (inverted) trees. + */ +export function collectCallTree( + tree: CallTree, + functionMap: FunctionMap, + threadIndexes: Set, + libs: Lib[], + options: CallTreeCollectionOptions = {} +): CallTreeNode { + const maxNodes = options.maxNodes ?? 100; + const scoringStrategy = options.scoringStrategy ?? 'exponential-0.9'; + const maxDepth = options.maxDepth ?? 200; + const maxChildrenPerNode = options.maxChildrenPerNode ?? 100; + + // Map from call node index to our collection node + const includedNodes = new Set(); + const expansionFrontier = new MaxHeap(); + + // Start with root nodes + // For inverted (bottom-up) trees, there can be many roots (all leaf functions). + // Reserve some budget for expanding children by limiting initial roots to ~70% of budget. + const roots = tree.getRoots(); + const maxInitialRoots = Math.min(roots.length, Math.ceil(maxNodes * 0.7)); + for (let i = 0; i < maxInitialRoots; i++) { + const rootIndex = roots[i]; + const nodeData = tree.getNodeData(rootIndex); + const totalPercentage = nodeData.totalRelative * 100; + const score = computeInclusionScore(totalPercentage, 0, scoringStrategy); + + const collectionNode: CollectionNode = { + callNodeIndex: rootIndex, + depth: 0, + }; + + expansionFrontier.push(collectionNode, score); + } + + // Expand nodes in score order until budget reached + while (includedNodes.size < maxNodes) { + const node = expansionFrontier.popMax(); + if (!node) { + break; + } + + // node is the next highest candidate; none of the other nodes in expansionFronteer, or + // any of their descendants, will have a higher score than node. Add it to the included + // set. + includedNodes.add(node.callNodeIndex); + + // Skip children if we've reached max depth + if (node.depth >= maxDepth || !tree.hasChildren(node.callNodeIndex)) { + continue; + } + + const childDepth = node.depth + 1; + + const children = tree.getChildren(node.callNodeIndex); + // Limit children per node to prevent budget explosion + const childrenToExpand = children.slice(0, maxChildrenPerNode); + + for (const childIndex of childrenToExpand) { + const childData = tree.getNodeData(childIndex); + const totalPercentage = childData.totalRelative * 100; + const childScore = computeInclusionScore( + totalPercentage, + childDepth, + scoringStrategy + ); + + const childNode: CollectionNode = { + callNodeIndex: childIndex, + depth: childDepth, + }; + + expansionFrontier.push(childNode, childScore); + } + } + + return buildTreeStructure( + tree, + includedNodes, + functionMap, + threadIndexes, + libs + ); +} + +/** + * Build tree structure from the set of included nodes. + */ +function buildTreeStructure( + tree: CallTree, + includedNodes: Set, + functionMap: FunctionMap, + threadIndexes: Set, + libs: Lib[] +): CallTreeNode { + // Get total sample count from the tree for percentage calculations + const totalSampleCount = tree.getTotal(); + + // Create virtual root + const rootNode: CallTreeNode = { + name: '', + nameWithLibrary: '', + totalSamples: totalSampleCount, + totalPercentage: 100, + selfSamples: 0, + selfPercentage: 0, + originalDepth: -1, + children: [], + }; + + const pendingNodes = [rootNode]; + + // Create tree nodes for all included nodes. + // Traverse the tree until we run out of pendingNodes. + while (true) { + const node = pendingNodes.pop(); + if (node === undefined) { + break; + } + + const childrenCallNodeIndexes = + node.callNodeIndex !== undefined + ? tree.getChildren(node.callNodeIndex) + : tree.getRoots(); + const elidedChildren = []; + const childrenDepth = node.originalDepth + 1; + for (const callNodeIndex of childrenCallNodeIndexes) { + if (!includedNodes.has(callNodeIndex)) { + elidedChildren.push(callNodeIndex); + continue; + } + const childNodeData = tree.getNodeData(callNodeIndex); + const funcIndex = tree._callNodeInfo.funcForNode(callNodeIndex); + const totalPercentage = childNodeData.totalRelative * 100; + + // Format function name with library prefix + const nameWithLibrary = formatFunctionNameWithLibrary( + funcIndex, + tree._thread, + libs + ); + + const childNode: CallTreeNode = { + callNodeIndex, + functionHandle: functionMap.handleForFunction(threadIndexes, funcIndex), + functionIndex: funcIndex, + name: childNodeData.funcName, + nameWithLibrary, + totalSamples: childNodeData.total, + totalPercentage, + selfSamples: childNodeData.self, + selfPercentage: childNodeData.selfRelative * 100, + originalDepth: childrenDepth, + children: [], + }; + + node.children.push(childNode); + pendingNodes.push(childNode); + } + + // Create elision marker if there are any elided or unexpanded children + if (elidedChildren.length > 0) { + let combinedSamples = 0; + let maxSamples = 0; + + // Stats for elided children that were expanded + for (const childIdx of elidedChildren) { + const childData = tree.getNodeData(childIdx); + combinedSamples += childData.total; + maxSamples = Math.max(maxSamples, childData.total); + } + + const combinedRelative = combinedSamples / totalSampleCount; + const maxRelative = maxSamples / totalSampleCount; + node.childrenTruncated = { + count: elidedChildren.length, + combinedSamples, + combinedPercentage: combinedRelative * 100, + maxSamples, + maxPercentage: maxRelative * 100, + depth: childrenDepth, + }; + } + } + + return rootNode; +} diff --git a/src/profile-query/formatters/marker-info.ts b/src/profile-query/formatters/marker-info.ts new file mode 100644 index 0000000000..caa4a0f861 --- /dev/null +++ b/src/profile-query/formatters/marker-info.ts @@ -0,0 +1,1516 @@ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +import { getSelectedThreadIndexes } from 'firefox-profiler/selectors/url-state'; +import { + getProfile, + getCategories, + getMarkerSchemaByName, + getStringTable, +} from 'firefox-profiler/selectors/profile'; +import { getThreadSelectors } from 'firefox-profiler/selectors/per-thread'; +import { + formatFromMarkerSchema, + getLabelGetter, +} from 'firefox-profiler/profile-logic/marker-schema'; +import { formatTimestamp } from 'firefox-profiler/utils/format-numbers'; +import { changeMarkersSearchString } from '../../actions/profile-view'; +import { + formatFunctionNameWithLibrary, + truncateFunctionName, +} from '../function-list'; +import type { Store } from '../../types/store'; +import type { ThreadMap } from '../thread-map'; +import type { MarkerMap } from '../marker-map'; +import type { + Marker, + MarkerIndex, + CategoryList, + Thread, + Lib, + IndexIntoStackTable, +} from 'firefox-profiler/types'; +import type { + MarkerStackResult, + MarkerInfoResult, + StackTraceData, + ThreadMarkersResult, + MarkerGroupData, + DurationStats, + RateStats, + MarkerFilterOptions, +} from '../types'; + +/** + * Aggregated statistics for a group of markers. + */ +interface MarkerTypeStats { + markerName: string; + count: number; + isInterval: boolean; + durationStats?: DurationStats; + rateStats?: RateStats; + topMarkers: Array<{ + handle: string; + label: string; + start: number; + duration?: number; + hasStack?: boolean; + }>; + subGroups?: MarkerGroup[]; // Sub-groups for multi-level grouping + subGroupKey?: string; // The key used for sub-grouping (e.g., "eventType" for auto-grouped fields) +} + +/** + * A group of markers with a common grouping key value. + */ +interface MarkerGroup { + groupName: string; + count: number; + isInterval: boolean; + durationStats?: DurationStats; + rateStats?: RateStats; + topMarkers: Array<{ + handle: string; + label: string; + start: number; + duration?: number; + hasStack?: boolean; + }>; + subGroups?: MarkerGroup[]; // Recursive sub-grouping +} + +/** + * A grouping key specifies how to group markers. + */ +type GroupingKey = + | 'type' // Group by marker type (data.type) + | 'name' // Group by marker name + | 'category' // Group by category name + | { field: string }; // Group by a specific field value + +/** + * Compute duration statistics for a list of markers. + * Only applies to interval markers (markers with an end time). + * Exported for testing. + */ +export function computeDurationStats( + markers: Marker[] +): DurationStats | undefined { + const durations = markers + .filter((m) => m.end !== null) + .map((m) => m.end! - m.start) + .sort((a, b) => a - b); + + if (durations.length === 0) { + return undefined; + } + + return { + min: durations[0], + max: durations[durations.length - 1], + avg: durations.reduce((a, b) => a + b, 0) / durations.length, + median: durations[Math.floor(durations.length / 2)], + p95: durations[Math.floor(durations.length * 0.95)], + p99: durations[Math.floor(durations.length * 0.99)], + }; +} + +/** + * Compute rate statistics for a list of markers (gaps between markers). + * Exported for testing. + */ +export function computeRateStats(markers: Marker[]): RateStats { + if (markers.length < 2) { + return { + markersPerSecond: 0, + minGap: 0, + avgGap: 0, + maxGap: 0, + }; + } + + const sorted = [...markers].sort((a, b) => a.start - b.start); + const gaps: number[] = []; + + for (let i = 1; i < sorted.length; i++) { + gaps.push(sorted[i].start - sorted[i - 1].start); + } + + const timeRange = sorted[sorted.length - 1].start - sorted[0].start; + // timeRange is in milliseconds, convert to seconds for rate + const markersPerSecond = + timeRange > 0 ? (markers.length / timeRange) * 1000 : 0; + + return { + markersPerSecond, + minGap: Math.min(...gaps), + avgGap: gaps.reduce((a, b) => a + b, 0) / gaps.length, + maxGap: Math.max(...gaps), + }; +} + +/** + * Format a duration in milliseconds to a human-readable string. + * Exported for testing. + */ +export function formatDuration(ms: number): string { + if (ms < 1) { + return `${(ms * 1000).toFixed(2)}μs`; + } else if (ms < 1000) { + return `${ms.toFixed(2)}ms`; + } + return `${(ms / 1000).toFixed(2)}s`; +} + +/** + * Apply all marker filters to a list of marker indexes. + * Returns the filtered list of marker indexes. + */ +function applyMarkerFilters( + markerIndexes: MarkerIndex[], + markers: Marker[], + categories: CategoryList, + filterOptions: MarkerFilterOptions +): MarkerIndex[] { + let filteredIndexes = markerIndexes; + + const { minDuration, maxDuration, category, hasStack, limit } = filterOptions; + + // Apply duration filtering if specified + if (minDuration !== undefined || maxDuration !== undefined) { + filteredIndexes = filteredIndexes.filter((markerIndex) => { + const marker = markers[markerIndex]; + + // Skip instant markers (they have no duration) + if (marker.end === null) { + return false; + } + + const duration = marker.end - marker.start; + + // Check min duration constraint + if (minDuration !== undefined && duration < minDuration) { + return false; + } + + // Check max duration constraint + if (maxDuration !== undefined && duration > maxDuration) { + return false; + } + + return true; + }); + } + + // Apply category filtering if specified + if (category !== undefined) { + const categoryLower = category.toLowerCase(); + filteredIndexes = filteredIndexes.filter((markerIndex) => { + const marker = markers[markerIndex]; + const categoryName = categories[marker.category]?.name ?? 'Unknown'; + return categoryName.toLowerCase().includes(categoryLower); + }); + } + + // Apply hasStack filtering if specified + if (hasStack) { + filteredIndexes = filteredIndexes.filter((markerIndex) => { + const marker = markers[markerIndex]; + return marker.data && 'cause' in marker.data && marker.data.cause; + }); + } + + // Apply limit if specified (after all filters) + if (limit !== undefined && filteredIndexes.length > limit) { + filteredIndexes = filteredIndexes.slice(0, limit); + } + + return filteredIndexes; +} + +/** + * Create a top markers array from a list of marker items. + * Returns up to 5 top markers, sorted by duration if applicable. + */ +function createTopMarkersArray( + items: Array<{ marker: Marker; index: MarkerIndex }>, + threadIndexes: Set, + markerMap: MarkerMap, + getMarkerLabel: (markerIndex: MarkerIndex) => string, + maxCount: number = 5 +): Array<{ + handle: string; + label: string; + start: number; + duration?: number; + hasStack?: boolean; +}> { + const hasEnd = items.some((item) => item.marker.end !== null); + + // Get top markers - sort by duration if interval markers, otherwise take first N + const sortedItems = hasEnd + ? [...items].sort( + (a, b) => + b.marker.end! - b.marker.start - (a.marker.end! - a.marker.start) + ) + : items.slice(0, maxCount); + + return sortedItems.slice(0, maxCount).map((item) => { + const handle = markerMap.handleForMarker(threadIndexes, item.index); + const label = getMarkerLabel(item.index); + const duration = + item.marker.end !== null + ? item.marker.end - item.marker.start + : undefined; + const hasStack = Boolean( + item.marker.data && 'cause' in item.marker.data && item.marker.data.cause + ); + return { + handle, + label: label || item.marker.name, + start: item.marker.start, + duration, + hasStack, + }; + }); +} + +/** + * Parse a groupBy string into an array of grouping keys. + * Examples: + * "type" => ['type'] + * "type,name" => ['type', 'name'] + * "type,field:eventType" => ['type', {field: 'eventType'}] + */ +function parseGroupingKeys(groupBy: string): GroupingKey[] { + return groupBy.split(',').map((key) => { + const trimmed = key.trim(); + if (trimmed.startsWith('field:')) { + return { field: trimmed.substring(6) }; + } + return trimmed as 'type' | 'name' | 'category'; + }); +} + +/** + * Get the grouping value for a marker based on a grouping key. + */ +function getGroupingValue( + marker: Marker, + key: GroupingKey, + categories: CategoryList +): string { + if (key === 'type') { + return marker.data?.type ?? marker.name; + } else if (key === 'name') { + return marker.name; + } else if (key === 'category') { + return categories[marker.category]?.name ?? 'Unknown'; + } + // Field-based grouping + const fieldValue = (marker.data as any)?.[key.field]; + if (fieldValue === undefined || fieldValue === null) { + return '(no value)'; + } + return String(fieldValue); +} + +/** + * Analyze field variance for a group of markers to determine if sub-grouping would be useful. + * Returns the best field for grouping based on a scoring heuristic, or null if none found. + * + * Scoring heuristic: + * - Prefers fields with a moderate number of unique values (3-20 ideal) + * - Avoids fields with too many unique values (likely IDs or timestamps) + * - Avoids fields with too few unique values (not enough variety) + * - Prefers fields that appear in most markers (>80%) + * - Excludes fields that look like IDs (end with "ID" or "Id") + * - Prefers fields with semantic names (type, event, phase, status, etc.) + */ +function analyzeFieldVariance( + markers: Marker[] +): { field: string; variance: number } | null { + if (markers.length === 0) { + return null; + } + + // Get the marker schema for the first marker to find available fields + const firstMarkerType = markers[0].data?.type; + if (!firstMarkerType) { + return null; + } + + // Analyze each field to find the one with best score + const fieldScores: Array<{ + field: string; + score: number; + uniqueCount: number; + }> = []; + + // Get all field keys from the first marker's data + const sampleData = markers[0].data; + if (!sampleData) { + return null; + } + + const fieldKeys = Object.keys(sampleData).filter((key) => { + // Exclude metadata fields + if (key === 'type' || key === 'cause') { + return false; + } + // Exclude fields that look like IDs (end with "ID" or "Id") + if (key.endsWith('ID') || key.endsWith('Id')) { + return false; + } + return true; + }); + + for (const fieldKey of fieldKeys) { + const uniqueValues = new Set(); + let validCount = 0; + + for (const marker of markers) { + const value = (marker.data as any)?.[fieldKey]; + if (value !== undefined && value !== null) { + uniqueValues.add(String(value)); + validCount++; + } + } + + const uniqueCount = uniqueValues.size; + + // Skip fields that don't appear frequently enough + if (validCount < markers.length * 0.8) { + continue; + } + + // Skip fields with too few unique values (< 3) + if (uniqueCount < 3) { + continue; + } + + // Calculate score based on how good this field is for grouping + // Prefer fields with 3-20 unique values (ideal range) + let score = 0; + if (uniqueCount >= 3 && uniqueCount <= 20) { + // Ideal range: score 100 + score = 100; + } else if (uniqueCount > 20 && uniqueCount <= 50) { + // Acceptable range: score decreases with more unique values + score = 100 - (uniqueCount - 20) * 2; + } else if (uniqueCount > 50) { + // Too many unique values (likely IDs): very low score + score = 10; + } + + // Boost score for fields that appear in all markers + if (validCount === markers.length) { + score += 10; + } + + // Boost score for semantically meaningful field names + const semanticFields = [ + 'eventType', + 'phase', + 'status', + 'operation', + 'category', + ]; + if (semanticFields.includes(fieldKey)) { + score += 20; + } + + fieldScores.push({ field: fieldKey, score, uniqueCount }); + } + + // Return the field with highest score + if (fieldScores.length === 0) { + return null; + } + + fieldScores.sort((a, b) => b.score - a.score); + return { field: fieldScores[0].field, variance: fieldScores[0].score / 100 }; +} + +/** + * Format marker groups hierarchically and append to the lines array. + */ +function formatMarkerGroups( + lines: string[], + groups: MarkerGroup[], + indentLevel: number, + maxGroups: number = 15 +): void { + const indent = ' '.repeat(indentLevel); + const topGroups = groups.slice(0, maxGroups); + + for (const group of topGroups) { + let line = `${indent}${group.groupName.padEnd(25)} ${group.count.toString().padStart(5)} markers`; + + if (group.durationStats) { + const { min, avg, max } = group.durationStats; + line += ` (interval: min=${formatDuration(min)}, avg=${formatDuration(avg)}, max=${formatDuration(max)})`; + } else if (group.isInterval) { + line += ' (interval)'; + } else { + line += ' (instant)'; + } + + lines.push(line); + + // Recursively format sub-groups + if (group.subGroups && group.subGroups.length > 0) { + formatMarkerGroups(lines, group.subGroups, indentLevel + 1, 10); + } + } + + if (groups.length > maxGroups) { + lines.push(`${indent}... (${groups.length - maxGroups} more groups)`); + } +} + +/** + * Group markers by a sequence of grouping keys (multi-level grouping). + * Returns a hierarchical structure of groups. + */ +function groupMarkers( + markerGroup: Array<{ marker: Marker; index: MarkerIndex }>, + groupingKeys: GroupingKey[], + categories: CategoryList, + threadIndexes: Set, + markerMap: MarkerMap, + getMarkerLabel: (markerIndex: MarkerIndex) => string, + depth: number = 0 +): MarkerGroup[] { + if (groupingKeys.length === 0 || markerGroup.length === 0) { + return []; + } + + const [currentKey, ...remainingKeys] = groupingKeys; + const groups = new Map< + string, + Array<{ marker: Marker; index: MarkerIndex }> + >(); + + // Group by current key + for (const item of markerGroup) { + const groupValue = getGroupingValue(item.marker, currentKey, categories); + if (!groups.has(groupValue)) { + groups.set(groupValue, []); + } + groups.get(groupValue)!.push(item); + } + + const result: MarkerGroup[] = []; + for (const [groupName, items] of groups.entries()) { + const markers = items.map((item) => item.marker); + const hasEnd = markers.some((m) => m.end !== null); + const durationStats = hasEnd ? computeDurationStats(markers) : undefined; + const rateStats = computeRateStats(markers); + + // Get top markers + const topMarkers = createTopMarkersArray( + items, + threadIndexes, + markerMap, + getMarkerLabel + ); + + // Recursively group by remaining keys (limit depth to 3) + const subGroups = + remainingKeys.length > 0 && depth < 2 + ? groupMarkers( + items, + remainingKeys, + categories, + threadIndexes, + markerMap, + getMarkerLabel, + depth + 1 + ) + : undefined; + + result.push({ + groupName, + count: markers.length, + isInterval: hasEnd, + durationStats, + rateStats, + topMarkers, + subGroups, + }); + } + + // Sort by count descending + result.sort((a, b) => b.count - a.count); + + return result; +} + +/** + * Aggregate markers by type and compute statistics. + * Optionally applies auto-grouping or custom grouping. + */ +function aggregateMarkersByType( + markers: Marker[], + markerIndexes: MarkerIndex[], + threadIndexes: Set, + markerMap: MarkerMap, + getMarkerLabel: (markerIndex: MarkerIndex) => string, + categories: CategoryList, + autoGroup: boolean = false +): MarkerTypeStats[] { + // Convert Set to number if needed + const groups = new Map< + string, + Array<{ marker: Marker; index: MarkerIndex }> + >(); + + for (const markerIndex of markerIndexes) { + const marker = markers[markerIndex]; + const markerName = marker.name; + + if (!groups.has(markerName)) { + groups.set(markerName, []); + } + groups.get(markerName)!.push({ marker, index: markerIndex }); + } + + const stats: MarkerTypeStats[] = []; + + for (const [markerName, markerGroup] of groups.entries()) { + const markerList = markerGroup.map((g) => g.marker); + const hasEnd = markerList.some((m) => m.end !== null); + const durationStats = hasEnd ? computeDurationStats(markerList) : undefined; + const rateStats = computeRateStats(markerList); + + // Get top 5 markers by duration (or just first 5 for instant markers) + const topMarkers = createTopMarkersArray( + markerGroup, + threadIndexes, + markerMap, + getMarkerLabel + ); + + // Apply auto-grouping if enabled + let subGroups: MarkerGroup[] | undefined; + let subGroupKey: string | undefined; + if (autoGroup && markerList.length > 5) { + const fieldInfo = analyzeFieldVariance(markerList); + if (fieldInfo) { + // Sub-group by the field with highest variance + subGroups = groupMarkers( + markerGroup, + [{ field: fieldInfo.field }], + categories, + threadIndexes, + markerMap, + getMarkerLabel, + 1 + ); + subGroupKey = fieldInfo.field; + } + } + + stats.push({ + markerName: markerName, + count: markerList.length, + isInterval: hasEnd, + durationStats, + rateStats, + topMarkers, + subGroups, + subGroupKey, + }); + } + + // Sort by count descending + stats.sort((a, b) => b.count - a.count); + + return stats; +} + +/** + * Aggregate markers by category. + */ +function aggregateMarkersByCategory( + markers: Marker[], + markerIndexes: MarkerIndex[], + categories: CategoryList +): Array<{ categoryName: string; count: number; percentage: number }> { + const groups = new Map(); + + for (const markerIndex of markerIndexes) { + const marker = markers[markerIndex]; + const categoryName = categories[marker.category]?.name ?? 'Unknown'; + + groups.set(categoryName, (groups.get(categoryName) ?? 0) + 1); + } + + const total = markerIndexes.length; + const stats = Array.from(groups.entries()) + .map(([categoryName, count]) => ({ + categoryName, + count, + percentage: (count / total) * 100, + })) + .sort((a, b) => b.count - a.count); + + return stats; +} + +/** + * Format the marker listing for a thread. + */ +export function formatThreadMarkers( + store: Store, + threadMap: ThreadMap, + markerMap: MarkerMap, + threadHandle?: string, + filterOptions: MarkerFilterOptions = {} +): string { + // Apply marker search filter if provided + const searchString = filterOptions.searchString || ''; + if (searchString) { + store.dispatch(changeMarkersSearchString(searchString)); + } + + try { + // Get state after potentially dispatching the search action + const state = store.getState(); + const threadIndexes = + threadHandle !== undefined + ? threadMap.threadIndexesForHandle(threadHandle) + : getSelectedThreadIndexes(state); + + const threadSelectors = getThreadSelectors(threadIndexes); + const friendlyThreadName = threadSelectors.getFriendlyThreadName(state); + const fullMarkerList = threadSelectors.getFullMarkerList(state); + const categories = getCategories(state); + const markerSchemaByName = getMarkerSchemaByName(state); + const stringTable = getStringTable(state); + + // Get marker indexes - use search-filtered if search is active, otherwise all markers + const originalCount = + threadSelectors.getFullMarkerListIndexes(state).length; + let filteredIndexes = searchString + ? threadSelectors.getSearchFilteredMarkerIndexes(state) + : threadSelectors.getFullMarkerListIndexes(state); + + // Apply all marker filters + filteredIndexes = applyMarkerFilters( + filteredIndexes, + fullMarkerList, + categories, + filterOptions + ); + + // Get label getter for markers + const getMarkerLabel = getLabelGetter( + (markerIndex: MarkerIndex) => fullMarkerList[markerIndex], + getProfile(state).meta.markerSchema, + markerSchemaByName, + categories, + stringTable, + 'tableLabel' + ); + + const lines: string[] = []; + + // Generate thread handle for display + const displayThreadHandle = + threadHandle ?? threadMap.handleForThreadIndexes(threadIndexes); + + // Check if filters are active + const { minDuration, maxDuration, category, hasStack, limit } = + filterOptions; + const hasFilters = + !!searchString || + minDuration !== undefined || + maxDuration !== undefined || + category !== undefined || + hasStack || + limit !== undefined; + const filterSuffix = + hasFilters && filteredIndexes.length !== originalCount + ? ` (filtered from ${originalCount})` + : ''; + + lines.push( + `Markers in thread ${displayThreadHandle} (${friendlyThreadName}) — ${filteredIndexes.length} markers${filterSuffix}\n` + ); + + if (filteredIndexes.length === 0) { + if (hasFilters) { + lines.push('No markers match the specified filters.'); + } else { + lines.push('No markers in this thread.'); + } + return lines.join('\n'); + } + + const { groupBy, autoGroup } = filterOptions; + + // Handle custom grouping if groupBy is specified + if (groupBy) { + const groupingKeys = parseGroupingKeys(groupBy); + const markerGroups: Array<{ marker: Marker; index: MarkerIndex }> = []; + for (const markerIndex of filteredIndexes) { + markerGroups.push({ + marker: fullMarkerList[markerIndex], + index: markerIndex, + }); + } + + const groups = groupMarkers( + markerGroups, + groupingKeys, + categories, + threadIndexes, + markerMap, + getMarkerLabel, + 0 + ); + + // Format and display hierarchical groups + formatMarkerGroups(lines, groups, 0); + } else { + // Default aggregation by type (with optional auto-grouping) + const typeStats = aggregateMarkersByType( + fullMarkerList, + filteredIndexes, + threadIndexes, + markerMap, + getMarkerLabel, + categories, + autoGroup || false + ); + + // Show top 15 marker names + lines.push('By Name (top 15):'); + const topTypes = typeStats.slice(0, 15); + for (const stats of topTypes) { + let line = ` ${stats.markerName.padEnd(25)} ${stats.count.toString().padStart(5)} markers`; + + if (stats.durationStats) { + const { min, avg, max } = stats.durationStats; + line += ` (interval: min=${formatDuration(min)}, avg=${formatDuration(avg)}, max=${formatDuration(max)})`; + } else { + line += ' (instant)'; + } + + lines.push(line); + + // Show top markers with handles (for easy inspection) + if (!stats.subGroups && stats.topMarkers.length > 0) { + const handleList = stats.topMarkers + .slice(0, 3) + .map((m) => { + const handleOnly = m.handle; + if (m.duration !== undefined) { + return `${handleOnly} (${formatDuration(m.duration)})`; + } + return handleOnly; + }) + .join(', '); + lines.push(` Examples: ${handleList}`); + } + + // Show sub-groups if present (from auto-grouping) + if (stats.subGroups && stats.subGroups.length > 0) { + if (stats.subGroupKey) { + lines.push(` Grouped by ${stats.subGroupKey}:`); + } + formatMarkerGroups(lines, stats.subGroups, 2); + } + } + + if (typeStats.length > 15) { + lines.push(` ... (${typeStats.length - 15} more types)`); + } + + lines.push(''); + + // Aggregate by category (using filtered indexes) + const categoryStats = aggregateMarkersByCategory( + fullMarkerList, + filteredIndexes, + categories + ); + + lines.push('By Category:'); + for (const stats of categoryStats) { + lines.push( + ` ${stats.categoryName.padEnd(25)} ${stats.count.toString().padStart(5)} markers (${stats.percentage.toFixed(1)}%)` + ); + } + + lines.push(''); + + // Frequency analysis for top types + lines.push('Frequency Analysis:'); + const topRateTypes = typeStats + .filter((s) => s.rateStats && s.rateStats.markersPerSecond > 0) + .slice(0, 5); + + for (const stats of topRateTypes) { + if (!stats.rateStats) continue; + const { markersPerSecond, minGap, avgGap, maxGap } = stats.rateStats; + lines.push( + ` ${stats.markerName}: ${markersPerSecond.toFixed(1)} markers/sec (interval: min=${formatDuration(minGap)}, avg=${formatDuration(avgGap)}, max=${formatDuration(maxGap)})` + ); + } + + lines.push(''); + } + + lines.push( + 'Use --search , --category , --min-duration , --max-duration , --has-stack, --limit , --group-by , or --auto-group to filter/group markers, or m- handles to inspect individual markers.' + ); + + return lines.join('\n'); + } finally { + // Always clear the search string to avoid affecting other queries + if (searchString) { + store.dispatch(changeMarkersSearchString('')); + } + } +} + +/** + * Collect thread markers data in structured format for JSON output. + */ +export function collectThreadMarkers( + store: Store, + threadMap: ThreadMap, + markerMap: MarkerMap, + threadHandle?: string, + filterOptions: MarkerFilterOptions = {} +): ThreadMarkersResult { + // Apply marker search filter if provided + const searchString = filterOptions.searchString || ''; + if (searchString) { + store.dispatch(changeMarkersSearchString(searchString)); + } + + try { + // Get state after potentially dispatching the search action + const state = store.getState(); + const threadIndexes = + threadHandle !== undefined + ? threadMap.threadIndexesForHandle(threadHandle) + : getSelectedThreadIndexes(state); + + const threadSelectors = getThreadSelectors(threadIndexes); + const friendlyThreadName = threadSelectors.getFriendlyThreadName(state); + const fullMarkerList = threadSelectors.getFullMarkerList(state); + const categories = getCategories(state); + const markerSchemaByName = getMarkerSchemaByName(state); + const stringTable = getStringTable(state); + + // Get marker indexes - use search-filtered if search is active, otherwise all markers + const originalCount = + threadSelectors.getFullMarkerListIndexes(state).length; + let filteredIndexes = searchString + ? threadSelectors.getSearchFilteredMarkerIndexes(state) + : threadSelectors.getFullMarkerListIndexes(state); + + // Apply all marker filters + filteredIndexes = applyMarkerFilters( + filteredIndexes, + fullMarkerList, + categories, + filterOptions + ); + + // Get label getter for markers + const getMarkerLabel = getLabelGetter( + (markerIndex: MarkerIndex) => fullMarkerList[markerIndex], + getProfile(state).meta.markerSchema, + markerSchemaByName, + categories, + stringTable, + 'tableLabel' + ); + + // Generate thread handle for display + const displayThreadHandle = + threadHandle ?? threadMap.handleForThreadIndexes(threadIndexes); + + const { groupBy, autoGroup } = filterOptions; + + // Handle custom grouping if groupBy is specified + let customGroups: MarkerGroupData[] | undefined; + if (groupBy) { + const groupingKeys = parseGroupingKeys(groupBy); + const markerGroups: Array<{ marker: Marker; index: MarkerIndex }> = []; + for (const markerIndex of filteredIndexes) { + markerGroups.push({ + marker: fullMarkerList[markerIndex], + index: markerIndex, + }); + } + + const groups = groupMarkers( + markerGroups, + groupingKeys, + categories, + threadIndexes, + markerMap, + getMarkerLabel, + 0 + ); + + // Add markerIndex to topMarkers in groups + customGroups = addMarkerIndexToGroups(groups); + } + + // Aggregate by type (with optional auto-grouping) + const typeStats = aggregateMarkersByType( + fullMarkerList, + filteredIndexes, + threadIndexes, + markerMap, + getMarkerLabel, + categories, + autoGroup || false + ); + + // Convert typeStats to include markerIndex + const byType = typeStats.map((stats) => ({ + markerName: stats.markerName, + count: stats.count, + isInterval: stats.isInterval, + durationStats: stats.durationStats, + rateStats: stats.rateStats, + topMarkers: stats.topMarkers.map((m) => ({ + handle: m.handle, + label: m.label, + start: m.start, + duration: m.duration, + hasStack: m.hasStack, + })), + subGroups: stats.subGroups + ? addMarkerIndexToGroups(stats.subGroups) + : undefined, + subGroupKey: stats.subGroupKey, + })); + + // Aggregate by category (using filtered indexes) + const categoryStats = aggregateMarkersByCategory( + fullMarkerList, + filteredIndexes, + categories + ); + + const byCategory = categoryStats.map((stats) => ({ + categoryName: stats.categoryName, + categoryIndex: categories.findIndex( + (cat) => cat?.name === stats.categoryName + ), + count: stats.count, + percentage: stats.percentage, + })); + + // Build filters object (only include if filters were applied) + const { minDuration, maxDuration, category, hasStack, limit } = + filterOptions; + const filters = + searchString || + minDuration !== undefined || + maxDuration !== undefined || + category !== undefined || + hasStack || + limit !== undefined + ? { + searchString: searchString || undefined, + minDuration, + maxDuration, + category, + hasStack, + limit, + } + : undefined; + + return { + type: 'thread-markers', + threadHandle: displayThreadHandle, + friendlyThreadName, + totalMarkerCount: originalCount, + filteredMarkerCount: filteredIndexes.length, + filters, + byType, + byCategory, + customGroups, + }; + } finally { + // Always clear the search string to avoid affecting other queries + if (searchString) { + store.dispatch(changeMarkersSearchString('')); + } + } +} + +/** + * Helper to add markerIndex to topMarkers in MarkerGroup arrays. + */ +function addMarkerIndexToGroups(groups: MarkerGroup[]): MarkerGroupData[] { + return groups.map((group) => ({ + groupName: group.groupName, + count: group.count, + isInterval: group.isInterval, + durationStats: group.durationStats, + rateStats: group.rateStats, + topMarkers: group.topMarkers.map((m) => ({ + handle: m.handle, + label: m.label, + start: m.start, + duration: m.duration, + hasStack: m.hasStack, + })), + subGroups: group.subGroups + ? addMarkerIndexToGroups(group.subGroups) + : undefined, + })); +} + +/** + * Format a marker's cause stack trace. + * Returns an array of formatted stack frame strings. + */ +function formatMarkerStack( + stackIndex: IndexIntoStackTable | null, + thread: Thread, + libs: Lib[], + maxFrames: number = 20 +): string[] { + if (stackIndex === null) { + return ['(no stack trace)']; + } + + const { stackTable, frameTable } = thread; + const frames: string[] = []; + + // Walk up the stack table to collect all frames + let currentStackIndex: IndexIntoStackTable | null = stackIndex; + while (currentStackIndex !== null) { + const frameIndex = stackTable.frame[currentStackIndex]; + const funcIndex = frameTable.func[frameIndex]; + const funcName = formatFunctionNameWithLibrary(funcIndex, thread, libs); + frames.push(funcName); + currentStackIndex = stackTable.prefix[currentStackIndex]; + } + + const lines: string[] = []; + const totalFrames = frames.length; + + if (totalFrames === 0) { + return ['(empty stack)']; + } + + // Show up to maxFrames, with ellipsis if there are more + const framesToShow = Math.min(totalFrames, maxFrames); + for (let i = 0; i < framesToShow; i++) { + const displayName = truncateFunctionName(frames[i], 100); + lines.push(` [${i + 1}] ${displayName}`); + } + + if (totalFrames > maxFrames) { + lines.push(` ... (${totalFrames - maxFrames} more frames)`); + } + + return lines; +} + +/** + * Collect stack trace data in structured format. + */ +function collectStackTrace( + stackIndex: IndexIntoStackTable | null, + thread: Thread, + libs: Lib[], + capturedAt?: number +): StackTraceData | null { + if (stackIndex === null) { + return null; + } + + const { stackTable, frameTable, funcTable, stringTable, resourceTable } = + thread; + const frames: StackTraceData['frames'] = []; + + // Walk up the stack table to collect all frames + let currentStackIndex: IndexIntoStackTable | null = stackIndex; + while (currentStackIndex !== null) { + const frameIndex = stackTable.frame[currentStackIndex]; + const funcIndex = frameTable.func[frameIndex]; + + // Get function name + const funcName = stringTable.getString(funcTable.name[funcIndex]); + + // Get library name if available + const resourceIndex = funcTable.resource[funcIndex]; + let library: string | undefined; + let nameWithLibrary = funcName; + + if (resourceIndex !== -1) { + const libIndex = resourceTable.lib[resourceIndex]; + if (libIndex !== null && libs) { + const lib = libs[libIndex]; + library = lib.name; + nameWithLibrary = `${library}!${funcName}`; + } else { + const resourceName = stringTable.getString( + resourceTable.name[resourceIndex] + ); + if (resourceName && resourceName !== funcName) { + nameWithLibrary = `${resourceName}!${funcName}`; + } + } + } + + frames.push({ + name: funcName, + nameWithLibrary, + library, + }); + + currentStackIndex = stackTable.prefix[currentStackIndex]; + } + + return { + frames, + truncated: false, + capturedAt, + }; +} + +/** + * Collect marker stack trace data in structured format. + */ +export function collectMarkerStack( + store: Store, + markerMap: MarkerMap, + threadMap: ThreadMap, + markerHandle: string +): MarkerStackResult { + const state = store.getState(); + const { threadIndexes, markerIndex } = + markerMap.markerForHandle(markerHandle); + + const threadSelectors = getThreadSelectors(threadIndexes); + const friendlyThreadName = threadSelectors.getFriendlyThreadName(state); + const fullMarkerList = threadSelectors.getFullMarkerList(state); + const marker = fullMarkerList[markerIndex]; + + if (!marker) { + throw new Error(`Marker ${markerHandle} not found`); + } + + const threadHandleDisplay = threadMap.handleForThreadIndexes(threadIndexes); + const profile = getProfile(state); + const thread = threadSelectors.getFilteredThread(state); + const libs = profile.libs; + + // Check if marker has a stack trace + let stack: StackTraceData | null = null; + if (marker.data && 'cause' in marker.data && marker.data.cause) { + const cause = marker.data.cause; + stack = collectStackTrace(cause.stack, thread, libs, cause.time); + } + + return { + type: 'marker-stack', + markerHandle, + markerIndex, + threadHandle: threadHandleDisplay, + friendlyThreadName, + markerName: marker.name, + stack, + }; +} + +/** + * Collect detailed marker information in structured format. + */ +export function collectMarkerInfo( + store: Store, + markerMap: MarkerMap, + threadMap: ThreadMap, + markerHandle: string +): MarkerInfoResult { + const state = store.getState(); + const { threadIndexes, markerIndex } = + markerMap.markerForHandle(markerHandle); + + const threadSelectors = getThreadSelectors(threadIndexes); + const friendlyThreadName = threadSelectors.getFriendlyThreadName(state); + const fullMarkerList = threadSelectors.getFullMarkerList(state); + const marker = fullMarkerList[markerIndex]; + + if (!marker) { + throw new Error(`Marker ${markerHandle} not found`); + } + + const categories = getCategories(state); + const markerSchemaByName = getMarkerSchemaByName(state); + const stringTable = getStringTable(state); + const threadHandleDisplay = threadMap.handleForThreadIndexes(threadIndexes); + + // Get tooltip label + const getTooltipLabel = getLabelGetter( + (mi: MarkerIndex) => fullMarkerList[mi], + getProfile(state).meta.markerSchema, + markerSchemaByName, + categories, + stringTable, + 'tooltipLabel' + ); + const tooltipLabel = getTooltipLabel(markerIndex); + + // Collect marker fields + let fields: MarkerInfoResult['fields']; + let schemaInfo: MarkerInfoResult['schema']; + + if (marker.data) { + const schema = markerSchemaByName[marker.data.type]; + if (schema && schema.fields.length > 0) { + fields = []; + for (const field of schema.fields) { + if (field.hidden) { + continue; + } + + const value = (marker.data as any)[field.key]; + if (value !== undefined && value !== null) { + const formattedValue = formatFromMarkerSchema( + marker.data.type, + field.format, + value, + stringTable + ); + fields.push({ + key: field.key, + label: field.label || field.key, + value, + formattedValue, + }); + } + } + } + + // Include schema description if available + if (schema?.description) { + schemaInfo = { description: schema.description }; + } + } + + // Collect stack trace if available (truncated to 20 frames) + let stack: StackTraceData | undefined; + if (marker.data && 'cause' in marker.data && marker.data.cause) { + const cause = marker.data.cause; + const profile = getProfile(state); + const thread = threadSelectors.getFilteredThread(state); + const libs = profile.libs; + + const fullStack = collectStackTrace(cause.stack, thread, libs, cause.time); + if (fullStack && fullStack.frames.length > 0) { + // Truncate to 20 frames + const truncated = fullStack.frames.length > 20; + stack = { + frames: fullStack.frames.slice(0, 20), + truncated, + capturedAt: fullStack.capturedAt, + }; + } + } + + return { + type: 'marker-info', + markerHandle, + markerIndex, + threadHandle: threadHandleDisplay, + friendlyThreadName, + name: marker.name, + tooltipLabel: tooltipLabel || undefined, + markerType: marker.data?.type, + category: { + index: marker.category, + name: categories[marker.category]?.name ?? 'Unknown', + }, + start: marker.start, + end: marker.end, + duration: marker.end !== null ? marker.end - marker.start : undefined, + fields, + schema: schemaInfo, + stack, + }; +} + +/** + * Format a marker's full stack trace. + * Shows all frames without limit. + */ +export function formatMarkerStackFull( + store: Store, + markerMap: MarkerMap, + threadMap: ThreadMap, + markerHandle: string +): string { + const state = store.getState(); + const { threadIndexes, markerIndex } = + markerMap.markerForHandle(markerHandle); + + const threadSelectors = getThreadSelectors(threadIndexes); + const friendlyThreadName = threadSelectors.getFriendlyThreadName(state); + const fullMarkerList = threadSelectors.getFullMarkerList(state); + const marker = fullMarkerList[markerIndex]; + + if (!marker) { + throw new Error(`Marker ${markerHandle} not found`); + } + + const lines: string[] = []; + const threadHandleDisplay = threadMap.handleForThreadIndexes(threadIndexes); + + lines.push(`Stack trace for marker ${markerHandle}: ${marker.name}\n`); + lines.push(`Thread: ${threadHandleDisplay} (${friendlyThreadName})`); + + // Check if marker has a stack trace + if (!marker.data || !('cause' in marker.data) || !marker.data.cause) { + lines.push('\n(This marker has no stack trace)'); + return lines.join('\n'); + } + + const cause = marker.data.cause; + const profile = getProfile(state); + const thread = threadSelectors.getFilteredThread(state); + const libs = profile.libs; + + if (cause.time !== undefined) { + const causeTimeStr = formatTimestamp(cause.time); + lines.push(`Captured at: ${causeTimeStr}\n`); + } + + const stackLines = formatMarkerStack(cause.stack, thread, libs, Infinity); + lines.push(...stackLines); + + return lines.join('\n'); +} + +/** + * Format detailed information about a specific marker. + */ +export function formatMarkerInfo( + store: Store, + markerMap: MarkerMap, + threadMap: ThreadMap, + markerHandle: string +): string { + const state = store.getState(); + const { threadIndexes, markerIndex } = + markerMap.markerForHandle(markerHandle); + + const threadSelectors = getThreadSelectors(threadIndexes); + const friendlyThreadName = threadSelectors.getFriendlyThreadName(state); + const fullMarkerList = threadSelectors.getFullMarkerList(state); + const marker = fullMarkerList[markerIndex]; + + if (!marker) { + throw new Error(`Marker ${markerHandle} not found`); + } + + const categories = getCategories(state); + const markerSchemaByName = getMarkerSchemaByName(state); + const stringTable = getStringTable(state); + + const lines: string[] = []; + const threadHandleDisplay = threadMap.handleForThreadIndexes(threadIndexes); + + // Get tooltip label + const getTooltipLabel = getLabelGetter( + (mi: MarkerIndex) => fullMarkerList[mi], + getProfile(state).meta.markerSchema, + markerSchemaByName, + categories, + stringTable, + 'tooltipLabel' + ); + const tooltipLabel = getTooltipLabel(markerIndex); + + lines.push( + `Marker ${markerHandle}: ${marker.name}${tooltipLabel ? ` - ${tooltipLabel}` : ''}\n` + ); + + // Basic info + lines.push(`Type: ${marker.data?.type ?? 'None'}`); + lines.push(`Category: ${categories[marker.category]?.name ?? 'Unknown'}`); + + const startStr = formatTimestamp(marker.start); + if (marker.end !== null) { + const endStr = formatTimestamp(marker.end); + const duration = marker.end - marker.start; + lines.push(`Time: ${startStr} - ${endStr} (${formatDuration(duration)})`); + } else { + lines.push(`Time: ${startStr} (instant)`); + } + + lines.push(`Thread: ${threadHandleDisplay} (${friendlyThreadName})`); + + // Marker data fields + if (marker.data) { + const schema = markerSchemaByName[marker.data.type]; + if (schema && schema.fields.length > 0) { + lines.push('\nFields:'); + for (const field of schema.fields) { + if (field.hidden) { + continue; + } + + const value = (marker.data as any)[field.key]; + if (value !== undefined && value !== null) { + const formattedValue = formatFromMarkerSchema( + marker.data.type, + field.format, + value, + stringTable + ); + lines.push(` ${field.label || field.key}: ${formattedValue}`); + } + } + } + + // Show description if available + if (schema?.description) { + lines.push(`\nDescription:`); + lines.push(` ${schema.description}`); + } + } + + // Show stack trace if available + if (marker.data && 'cause' in marker.data && marker.data.cause) { + const cause = marker.data.cause; + const profile = getProfile(state); + const thread = threadSelectors.getFilteredThread(state); + const libs = profile.libs; + + lines.push('\nStack trace:'); + if (cause.time !== undefined) { + const causeTimeStr = formatTimestamp(cause.time); + lines.push(` Captured at: ${causeTimeStr}`); + } + + const stackLines = formatMarkerStack(cause.stack, thread, libs, 20); + lines.push(...stackLines); + + if (stackLines.length > 21) { + lines.push( + `\nUse 'pq marker stack ${markerHandle}' for the full stack trace.` + ); + } + } + + return lines.join('\n'); +} diff --git a/src/profile-query/formatters/profile-info.ts b/src/profile-query/formatters/profile-info.ts new file mode 100644 index 0000000000..96cfdde538 --- /dev/null +++ b/src/profile-query/formatters/profile-info.ts @@ -0,0 +1,240 @@ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +import { + getProfile, + getThreadCPUTimeMs, + getRangeFilteredCombinedThreadActivitySlices, +} from 'firefox-profiler/selectors/profile'; +import { getProfileNameWithDefault } from 'firefox-profiler/selectors/url-state'; +import { buildProcessThreadList } from '../process-thread-list'; +import { formatTimestamp } from '../../utils/format-numbers'; +import { printSliceTree, collectSliceTree } from '../cpu-activity'; +import type { Store } from '../../types/store'; +import type { ThreadInfo } from '../process-thread-list'; +import type { TimestampManager } from '../timestamps'; +import type { ThreadMap } from '../thread-map'; +import type { ProfileInfoResult } from '../types'; + +/** + * Collect profile information in structured format. + */ +export function collectProfileInfo( + store: Store, + timestampManager: TimestampManager, + threadMap: ThreadMap, + processIndexMap: Map +): ProfileInfoResult { + const state = store.getState(); + const profile = getProfile(state); + const profileName = getProfileNameWithDefault(state); + const processCount = new Set(profile.threads.map((t) => t.pid)).size; + const threadCPUTimeMs = getThreadCPUTimeMs(state); + + // Build thread info array + const threads: ThreadInfo[] = profile.threads.map((thread, index) => ({ + threadIndex: index, + name: thread.name, + cpuMs: threadCPUTimeMs ? threadCPUTimeMs[index] : 0, + pid: thread.pid, + })); + + // Build the process/thread list + const result = buildProcessThreadList(threads, processIndexMap); + + // Apply process names and timing from the profile + const processesData: ProfileInfoResult['processes'] = result.processes.map( + (processItem) => { + // Find a thread from this process to get the process name and timing + const threadFromProcess = profile.threads.find( + (t) => t.pid === processItem.pid + ); + + let processName = 'unknown'; + let startTime: number | undefined; + let startTimeName: string | undefined; + let endTime: number | null | undefined; + let endTimeName: string | null | undefined; + + if (threadFromProcess) { + processName = + threadFromProcess.processName || + threadFromProcess.processType || + 'unknown'; + startTime = threadFromProcess.processStartupTime; + if (startTime !== undefined) { + startTimeName = timestampManager.nameForTimestamp(startTime); + endTime = threadFromProcess.processShutdownTime; + if (endTime !== null && endTime !== undefined) { + endTimeName = timestampManager.nameForTimestamp(endTime); + } else { + endTimeName = null; + } + } + } + + return { + processIndex: processItem.processIndex, + pid: processItem.pid, + name: processName, + cpuMs: processItem.cpuMs, + startTime, + startTimeName, + endTime, + endTimeName, + threads: processItem.threads.map((thread) => ({ + threadIndex: thread.threadIndex, + threadHandle: threadMap.handleForThreadIndex(thread.threadIndex), + name: thread.name, + cpuMs: thread.cpuMs, + })), + remainingThreads: processItem.remainingThreads, + }; + } + ); + + // Collect CPU activity (respecting zoom) + const combinedCpuActivity = + getRangeFilteredCombinedThreadActivitySlices(state); + const cpuActivity = + combinedCpuActivity !== null + ? collectSliceTree(combinedCpuActivity, timestampManager) + : null; + + return { + type: 'profile-info', + name: profileName || 'Unknown Profile', + platform: profile.meta.oscpu || 'Unknown', + threadCount: profile.threads.length, + processCount, + processes: processesData, + remainingProcesses: result.remainingProcesses, + cpuActivity, + }; +} + +export function formatProfileInfo( + store: Store, + timestampManager: TimestampManager, + threadMap: ThreadMap, + processIndexMap: Map +): string { + const state = store.getState(); + const profile = getProfile(state); + const profileName = getProfileNameWithDefault(state); + const processCount = new Set(profile.threads.map((t) => t.pid)).size; + const threadCPUTimeMs = getThreadCPUTimeMs(state); + + // If no CPU time data, fall back to a simple message + if (threadCPUTimeMs === null) { + return `\ +Name: ${profileName} +Platform: ${profile.meta.oscpu} + +This profile contains ${profile.threads.length} threads across ${processCount} processes. +(CPU time information not available)`; + } + + // Build thread info array for the function + const threads: ThreadInfo[] = profile.threads.map((thread, index) => ({ + threadIndex: index, + name: thread.name, + cpuMs: threadCPUTimeMs[index], + pid: thread.pid, + })); + + // Use the testable function to build the process/thread list + const result = buildProcessThreadList(threads, processIndexMap); + + // Apply process names and timing from the profile + result.processes.forEach((processItem) => { + // Find a thread from this process to get the process name and timing + const threadFromProcess = profile.threads.find( + (t) => t.pid === processItem.pid + ); + if (threadFromProcess) { + processItem.name = + threadFromProcess.processName || + threadFromProcess.processType || + 'unknown'; + // Add process start/end times (same for all threads in a process) + processItem.startTime = threadFromProcess.processStartupTime; + processItem.endTime = threadFromProcess.processShutdownTime; + } + }); + + // Build the output lines + const lines: string[] = []; + result.processes.forEach( + ({ + pid, + name, + cpuMs, + processIndex, + threads, + remainingThreads, + startTime, + endTime, + }) => { + // Format process timing information + let timingInfo = ''; + if (startTime !== undefined) { + const startName = timestampManager.nameForTimestamp(startTime); + if (endTime !== null && endTime !== undefined) { + const endName = timestampManager.nameForTimestamp(endTime); + timingInfo = ` [${startName} → ${endName}]`; + } else { + timingInfo = ` [${startName} → end]`; + } + } + + lines.push( + ` p-${processIndex}: ${name} [pid ${pid}]${timingInfo} - ${formatTimestamp(cpuMs)}` + ); + + threads.forEach( + ({ threadIndex, name: threadName, cpuMs: threadCpuMs }) => { + const threadHandle = threadMap.handleForThreadIndex(threadIndex); + lines.push( + ` ${threadHandle}: ${threadName} - ${formatTimestamp(threadCpuMs)}` + ); + } + ); + + // Add summary line for remaining threads + if (remainingThreads) { + lines.push( + ` + ${remainingThreads.count} more threads with combined CPU time ${formatTimestamp(remainingThreads.combinedCpuMs)} and max CPU time ${formatTimestamp(remainingThreads.maxCpuMs)}` + ); + } + } + ); + + // Add summary line for remaining processes + if (result.remainingProcesses) { + lines.push( + ` + ${result.remainingProcesses.count} more processes with combined CPU time ${formatTimestamp(result.remainingProcesses.combinedCpuMs)} and max CPU time ${formatTimestamp(result.remainingProcesses.maxCpuMs)}` + ); + } + + const combinedCpuActivity = + getRangeFilteredCombinedThreadActivitySlices(state); + const cpuActivityLines = + combinedCpuActivity !== null + ? printSliceTree(combinedCpuActivity, timestampManager) + : []; + + return `\ +Name: ${profileName} +Platform: ${profile.meta.oscpu} + +This profile contains ${profile.threads.length} threads across ${processCount} processes. + +Top processes and threads by CPU usage: +${lines.join('\n')} + +CPU activity over time: +${cpuActivityLines.join('\n')} +`; +} diff --git a/src/profile-query/formatters/thread-info.ts b/src/profile-query/formatters/thread-info.ts new file mode 100644 index 0000000000..e0d8ee3289 --- /dev/null +++ b/src/profile-query/formatters/thread-info.ts @@ -0,0 +1,492 @@ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +import { + getSelectedThreadIndexes, + getAllCommittedRanges, +} from 'firefox-profiler/selectors/url-state'; +import { + getCategories, + getDefaultCategory, + getProfile, +} from 'firefox-profiler/selectors/profile'; +import { printSliceTree, collectSliceTree } from '../cpu-activity'; +import { getThreadSelectors } from 'firefox-profiler/selectors/per-thread'; +import type { + ThreadInfoResult, + ThreadSamplesResult, + ThreadSamplesTopDownResult, + ThreadSamplesBottomUpResult, + ThreadFunctionsResult, + FunctionFilterOptions, + TopFunctionInfo, +} from '../types'; +import { + extractFunctionData, + formatFunctionNameWithLibrary, +} from '../function-list'; +import { collectCallTree } from './call-tree'; +import type { CallTreeCollectionOptions } from './call-tree'; +import { + computeCallTreeTimings, + getCallTree, + computeCallNodeSelfAndSummary, +} from 'firefox-profiler/profile-logic/call-tree'; +import { getInvertedCallNodeInfo } from 'firefox-profiler/profile-logic/profile-data'; +import type { Store } from '../../types/store'; +import type { TimestampManager } from '../timestamps'; +import type { ThreadMap } from '../thread-map'; +import type { FunctionMap } from '../function-map'; +import type { CallNodePath } from 'firefox-profiler/types'; + +export function formatThreadInfo( + store: Store, + timestampManager: TimestampManager, + threadMap: ThreadMap, + threadHandle?: string +): string { + const state = store.getState(); + const threadIndexes = + threadHandle !== undefined + ? threadMap.threadIndexesForHandle(threadHandle) + : getSelectedThreadIndexes(state); + const threadSelectors = getThreadSelectors(threadIndexes); + const thread = threadSelectors.getRawThread(state); + const friendlyThreadName = threadSelectors.getFriendlyThreadName(state); + const cptuActivity = threadSelectors.getRangeFilteredActivitySlices(state); + const cpuActivityLines = + cptuActivity !== null ? printSliceTree(cptuActivity, timestampManager) : []; + + return `\ +Name: ${friendlyThreadName} +Created at: ${timestampManager.nameForTimestamp(thread.registerTime)} +Ended at: ${thread.unregisterTime !== null ? timestampManager.nameForTimestamp(thread.unregisterTime) : 'still alive at end of recording'} + +This thread contains ${thread.samples.length} samples and ${thread.markers.length} markers. + +CPU activity over time: +${cpuActivityLines.join('\n')} +`; +} + +/** + * Collect thread info as structured data. + */ +export function collectThreadInfo( + store: Store, + timestampManager: TimestampManager, + threadMap: ThreadMap, + threadHandle?: string +): ThreadInfoResult { + const state = store.getState(); + const threadIndexes = + threadHandle !== undefined + ? threadMap.threadIndexesForHandle(threadHandle) + : getSelectedThreadIndexes(state); + const threadSelectors = getThreadSelectors(threadIndexes); + const thread = threadSelectors.getRawThread(state); + const friendlyThreadName = threadSelectors.getFriendlyThreadName(state); + const cpuActivitySlices = + threadSelectors.getRangeFilteredActivitySlices(state); + const cpuActivity = + cpuActivitySlices !== null + ? collectSliceTree(cpuActivitySlices, timestampManager) + : null; + + const actualThreadHandle = + threadHandle ?? threadMap.handleForThreadIndexes(threadIndexes); + + return { + type: 'thread-info', + threadHandle: actualThreadHandle, + name: thread.name, + friendlyName: friendlyThreadName, + createdAt: thread.registerTime, + createdAtName: timestampManager.nameForTimestamp(thread.registerTime), + endedAt: thread.unregisterTime, + endedAtName: + thread.unregisterTime !== null + ? timestampManager.nameForTimestamp(thread.unregisterTime) + : null, + sampleCount: thread.samples.length, + markerCount: thread.markers.length, + cpuActivity, + }; +} + +/** + * Collect thread samples data in structured format. + */ +export function collectThreadSamples( + store: Store, + threadMap: ThreadMap, + functionMap: FunctionMap, + threadHandle?: string +): ThreadSamplesResult { + const state = store.getState(); + const threadIndexes = + threadHandle !== undefined + ? threadMap.threadIndexesForHandle(threadHandle) + : getSelectedThreadIndexes(state); + const threadHandleDisplay = threadMap.handleForThreadIndexes(threadIndexes); + const threadSelectors = getThreadSelectors(threadIndexes); + const friendlyThreadName = threadSelectors.getFriendlyThreadName(state); + const thread = threadSelectors.getFilteredThread(state); + const libs = getProfile(state).libs; + + // Get call trees for analysis + const functionListTree = threadSelectors.getFunctionListTree(state); + const callTree = threadSelectors.getCallTree(state); + + // Extract function data + const functions = extractFunctionData(functionListTree, thread, libs); + + // Sort by total and take top 50 + const sortedByTotal = functions + .slice() + .sort((a, b) => b.total - a.total) + .slice(0, 50); + + // Sort by self and take top 50 + const sortedBySelf = functions + .slice() + .sort((a, b) => b.self - a.self) + .slice(0, 50); + + // Convert top functions to structured format + const topFunctionsByTotal: TopFunctionInfo[] = sortedByTotal.map((func) => ({ + functionHandle: functionMap.handleForFunction( + threadIndexes, + func.funcIndex + ), + functionIndex: func.funcIndex, + name: func.funcName, + nameWithLibrary: func.funcName, // Already includes library from extractFunctionData + totalSamples: func.total, + totalPercentage: func.totalRelative * 100, + selfSamples: func.self, + selfPercentage: func.selfRelative * 100, + library: undefined, // Could extract from funcName if needed + })); + + const topFunctionsBySelf: TopFunctionInfo[] = sortedBySelf.map((func) => ({ + functionHandle: functionMap.handleForFunction( + threadIndexes, + func.funcIndex + ), + functionIndex: func.funcIndex, + name: func.funcName, + nameWithLibrary: func.funcName, // Already includes library from extractFunctionData + totalSamples: func.total, + totalPercentage: func.totalRelative * 100, + selfSamples: func.self, + selfPercentage: func.selfRelative * 100, + library: undefined, // Could extract from funcName if needed + })); + + // Create a map from funcIndex to function data for quick lookup + const funcMap = new Map(functions.map((f) => [f.funcIndex, f])); + + // Collect heaviest stack + const roots = callTree.getRoots(); + let heaviestStack: ThreadSamplesResult['heaviestStack'] = { + selfSamples: 0, + frameCount: 0, + frames: [], + }; + + if (roots.length > 0) { + const heaviestPath: CallNodePath = + callTree._internal.findHeaviestPathInSubtree(roots[0]); + + if (heaviestPath.length > 0) { + const callNodeInfo = callTree._callNodeInfo; + const leafNodeIndex = callNodeInfo.getCallNodeIndexFromPath(heaviestPath); + + if (leafNodeIndex !== null) { + const leafNodeData = callTree.getNodeData(leafNodeIndex); + + heaviestStack = { + selfSamples: leafNodeData.self, + frameCount: heaviestPath.length, + frames: heaviestPath.map((funcIndex) => { + const funcName = formatFunctionNameWithLibrary( + funcIndex, + thread, + libs + ); + const funcData = funcMap.get(funcIndex); + return { + funcIndex, + name: funcName, + nameWithLibrary: funcName, + totalSamples: funcData?.total ?? 0, + totalPercentage: (funcData?.totalRelative ?? 0) * 100, + selfSamples: funcData?.self ?? 0, + selfPercentage: (funcData?.selfRelative ?? 0) * 100, + }; + }), + }; + } + } + } + + return { + type: 'thread-samples', + threadHandle: threadHandleDisplay, + friendlyThreadName, + topFunctionsByTotal, + topFunctionsBySelf, + heaviestStack, + }; +} + +/** + * Collect thread samples bottom-up data in structured format. + * Shows the inverted call tree (callers of hot functions). + */ +export function collectThreadSamplesBottomUp( + store: Store, + threadMap: ThreadMap, + functionMap: FunctionMap, + threadHandle?: string, + callTreeOptions?: CallTreeCollectionOptions +): ThreadSamplesBottomUpResult { + const state = store.getState(); + const threadIndexes = + threadHandle !== undefined + ? threadMap.threadIndexesForHandle(threadHandle) + : getSelectedThreadIndexes(state); + const threadHandleDisplay = threadMap.handleForThreadIndexes(threadIndexes); + const threadSelectors = getThreadSelectors(threadIndexes); + const friendlyThreadName = threadSelectors.getFriendlyThreadName(state); + const thread = threadSelectors.getFilteredThread(state); + + // Collect inverted call tree + let invertedCallTree = null; + try { + const callNodeInfo = threadSelectors.getCallNodeInfo(state); + const categories = getCategories(state); + const defaultCategory = getDefaultCategory(state); + const weightType = threadSelectors.getWeightTypeForCallTree(state); + + const samples = threadSelectors.getPreviewFilteredCtssSamples(state); + const sampleIndexToCallNodeIndex = + threadSelectors.getSampleIndexToNonInvertedCallNodeIndexForFilteredThread( + state + ); + + const callNodeSelfAndSummary = computeCallNodeSelfAndSummary( + samples, + sampleIndexToCallNodeIndex, + callNodeInfo.getCallNodeTable().length + ); + + const invertedCallNodeInfo = getInvertedCallNodeInfo( + callNodeInfo, + defaultCategory, + thread.funcTable.length + ); + + const invertedTimings = computeCallTreeTimings( + invertedCallNodeInfo, + callNodeSelfAndSummary + ); + + const invertedTree = getCallTree( + thread, + invertedCallNodeInfo, + categories, + invertedTimings, + weightType + ); + + // Note: Bottom-up tree uses the same threadIndexes to generate function handles + const libs = getProfile(state).libs; + invertedCallTree = collectCallTree( + invertedTree, + functionMap, + threadIndexes, + libs, + callTreeOptions + ); + } catch (_e) { + // Inverted tree creation failed, leave as null + } + + return { + type: 'thread-samples-bottom-up', + threadHandle: threadHandleDisplay, + friendlyThreadName, + invertedCallTree, + }; +} + +/** + * Collect thread samples top-down data in structured format. + * Shows the regular call tree (top-down view of hot paths). + */ +export function collectThreadSamplesTopDown( + store: Store, + threadMap: ThreadMap, + functionMap: FunctionMap, + threadHandle?: string, + callTreeOptions?: CallTreeCollectionOptions +): ThreadSamplesTopDownResult { + const state = store.getState(); + const threadIndexes = + threadHandle !== undefined + ? threadMap.threadIndexesForHandle(threadHandle) + : getSelectedThreadIndexes(state); + const threadHandleDisplay = threadMap.handleForThreadIndexes(threadIndexes); + const threadSelectors = getThreadSelectors(threadIndexes); + const friendlyThreadName = threadSelectors.getFriendlyThreadName(state); + const callTree = threadSelectors.getCallTree(state); + const libs = getProfile(state).libs; + + // Collect regular call tree + const regularCallTree = collectCallTree( + callTree, + functionMap, + threadIndexes, + libs, + callTreeOptions + ); + + return { + type: 'thread-samples-top-down', + threadHandle: threadHandleDisplay, + friendlyThreadName, + regularCallTree, + }; +} + +/** + * Collect thread functions data in structured format. + * Lists all functions with their CPU percentages, supporting search and filtering. + */ +export function collectThreadFunctions( + store: Store, + threadMap: ThreadMap, + functionMap: FunctionMap, + threadHandle?: string, + filterOptions?: FunctionFilterOptions +): ThreadFunctionsResult { + const state = store.getState(); + const threadIndexes = + threadHandle !== undefined + ? threadMap.threadIndexesForHandle(threadHandle) + : getSelectedThreadIndexes(state); + const threadHandleDisplay = threadMap.handleForThreadIndexes(threadIndexes); + const threadSelectors = getThreadSelectors(threadIndexes); + const friendlyThreadName = threadSelectors.getFriendlyThreadName(state); + const thread = threadSelectors.getFilteredThread(state); + const libs = getProfile(state).libs; + + // Get function list tree + const functionListTree = threadSelectors.getFunctionListTree(state); + + // Extract function data + const allFunctions = extractFunctionData(functionListTree, thread, libs); + const totalFunctionCount = allFunctions.length; + + // Check if we're zoomed (have committed ranges) + const committedRanges = getAllCommittedRanges(state); + const isZoomed = committedRanges.length > 0; + + // If zoomed, get full profile total samples for percentage calculation + // We can compute this from any function in allFunctions that has a non-zero totalRelative + // Formula: fullTotalSamples = total / totalRelative + // But since totalRelative is based on current view, we need the UNzoomed totalRelative + // Simpler approach: The raw thread has all samples - count them directly + let fullProfileTotalSamples: number | null = null; + if (isZoomed) { + // Get the unfiltered thread to count total samples + const rawThread = threadSelectors.getRawThread(state); + fullProfileTotalSamples = rawThread.samples.length; + } + + // Apply filters + let filteredFunctions = allFunctions; + + // Filter by search string (case-insensitive substring match) + if (filterOptions?.searchString) { + const searchLower = filterOptions.searchString.toLowerCase(); + filteredFunctions = filteredFunctions.filter((func) => + func.funcName.toLowerCase().includes(searchLower) + ); + } + + // Filter by minimum self time percentage + if (filterOptions?.minSelf !== undefined) { + const minSelfFraction = filterOptions.minSelf / 100; + filteredFunctions = filteredFunctions.filter( + (func) => func.selfRelative >= minSelfFraction + ); + } + + // Sort by self time (descending) + filteredFunctions.sort((a, b) => b.self - a.self); + + // Apply limit + const limit = filterOptions?.limit ?? filteredFunctions.length; + const limitedFunctions = filteredFunctions.slice(0, limit); + + // Convert to structured format + const functions: ThreadFunctionsResult['functions'] = limitedFunctions.map( + (func) => { + const nameWithLibrary = func.funcName; + // Extract library name if present (format: "library!function") + const bangIndex = nameWithLibrary.indexOf('!'); + const library = + bangIndex !== -1 ? nameWithLibrary.substring(0, bangIndex) : undefined; + const name = + bangIndex !== -1 + ? nameWithLibrary.substring(bangIndex + 1) + : nameWithLibrary; + + // Get full profile percentages if zoomed + let fullSelfPercentage: number | undefined; + let fullTotalPercentage: number | undefined; + if (fullProfileTotalSamples !== null) { + // Calculate percentages relative to full profile + fullSelfPercentage = (func.self / fullProfileTotalSamples) * 100; + fullTotalPercentage = (func.total / fullProfileTotalSamples) * 100; + } + + return { + functionHandle: functionMap.handleForFunction( + threadIndexes, + func.funcIndex + ), + functionIndex: func.funcIndex, + name, + nameWithLibrary, + selfSamples: func.self, + selfPercentage: func.selfRelative * 100, + totalSamples: func.total, + totalPercentage: func.totalRelative * 100, + library, + fullSelfPercentage, + fullTotalPercentage, + }; + } + ); + + return { + type: 'thread-functions', + threadHandle: threadHandleDisplay, + friendlyThreadName, + totalFunctionCount, + filteredFunctionCount: filteredFunctions.length, + filters: filterOptions + ? { + searchString: filterOptions.searchString, + minSelf: filterOptions.minSelf, + limit: filterOptions.limit, + } + : undefined, + functions, + }; +} diff --git a/src/profile-query/function-list.ts b/src/profile-query/function-list.ts new file mode 100644 index 0000000000..7d3a2ce65b --- /dev/null +++ b/src/profile-query/function-list.ts @@ -0,0 +1,525 @@ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +import type { Thread, Lib, ThreadIndex } from 'firefox-profiler/types'; +import type { FunctionMap } from './function-map'; + +export type FunctionData = { + funcName: string; + funcIndex: number; + total: number; + self: number; + totalRelative: number; + selfRelative: number; +}; + +export type FunctionListStats = { + omittedCount: number; + maxTotal: number; + maxSelf: number; + sumSelf: number; +}; + +export type FormattedFunctionList = { + title: string; + lines: string[]; + stats: FunctionListStats | null; +}; + +/** + * A tree node representing a segment of a function name that can be truncated. + */ +type TruncNode = { + type: 'text' | 'nested'; + text: string; // For text nodes, the actual text. For nested, empty. + openBracket?: string; // '(' or '<' for nested nodes + closeBracket?: string; // ')' or '>' for nested nodes + children: TruncNode[]; // Child nodes (for nested nodes) +}; + +/** + * Parse a function name into a tree structure. + * Each nested section (templates, parameters) becomes a tree node that can be collapsed. + */ +function parseFunctionNameTree(name: string): TruncNode[] { + const stack: TruncNode[][] = [[]]; // Stack of node lists + let currentText = ''; + + const flushText = () => { + if (currentText) { + stack[stack.length - 1].push({ + type: 'text', + text: currentText, + children: [], + }); + currentText = ''; + } + }; + + for (let i = 0; i < name.length; i++) { + const char = name[i]; + + if (char === '<' || char === '(') { + flushText(); + + // Create a new nested node + const nestedNode: TruncNode = { + type: 'nested', + text: '', + openBracket: char, + closeBracket: char === '<' ? '>' : ')', + children: [], + }; + + // Add to current level + stack[stack.length - 1].push(nestedNode); + + // Push a new level for the nested content + stack.push(nestedNode.children); + } else if (char === '>' || char === ')') { + flushText(); + + // Pop back to parent level + if (stack.length > 1) { + stack.pop(); + } + } else { + currentText += char; + } + } + + flushText(); + return stack[0]; +} + +/** + * Render a tree of nodes to a string. + */ +function renderTree(nodes: TruncNode[]): string { + return nodes + .map((node) => { + if (node.type === 'text') { + return node.text; + } + // Nested node + const inner = renderTree(node.children); + return `${node.openBracket}${inner}${node.closeBracket}`; + }) + .join(''); +} + +/** + * Calculate the length of a tree if fully rendered. + */ +function treeLength(nodes: TruncNode[]): number { + return nodes.reduce((len, node) => { + if (node.type === 'text') { + return len + node.text.length; + } + // Nested: brackets + children + return len + 2 + treeLength(node.children); // 2 for open/close brackets + }, 0); +} + +/** + * Truncate a tree to fit within maxLength characters. + * Collapses nested nodes to `<...>` or `(...)` when needed. + */ +function truncateTree(nodes: TruncNode[], maxLength: number): string { + if (treeLength(nodes) <= maxLength) { + return renderTree(nodes); + } + + let result = ''; + + for (const node of nodes) { + const spaceLeft = maxLength - result.length; + if (spaceLeft <= 0) { + break; + } + + if (node.type === 'text') { + if (node.text.length <= spaceLeft) { + result += node.text; + } else { + // Truncate text, trying to break at :: for namespaces + const parts = node.text.split('::'); + for (let i = 0; i < parts.length; i++) { + const part = parts[i] + (i < parts.length - 1 ? '::' : ''); + if (result.length + part.length <= maxLength) { + result += part; + } else { + break; + } + } + break; + } + } else { + // Nested node + const fullNested = renderTree(node.children); + const fullWithBrackets = `${node.openBracket}${fullNested}${node.closeBracket}`; + const collapsed = `${node.openBracket}...${node.closeBracket}`; + + if (fullWithBrackets.length <= spaceLeft) { + // Full content fits + result += fullWithBrackets; + } else if (collapsed.length <= spaceLeft) { + // Try to recursively truncate children + const availableForChildren = spaceLeft - 2; // 2 for brackets + const truncatedChildren = truncateTree( + node.children, + availableForChildren + ); + + if (truncatedChildren.length <= availableForChildren) { + result += `${node.openBracket}${truncatedChildren}${node.closeBracket}`; + } else { + // Just collapse + result += collapsed; + } + } else { + // Can't even fit collapsed version + break; + } + } + } + + return result; +} + +/** + * Find the last top-level `::` separator in a tree (not inside any nesting). + * Returns the index in the nodes array and position within that text node. + */ +function findLastTopLevelSeparator( + nodes: TruncNode[] +): { nodeIndex: number; position: number } | null { + for (let i = nodes.length - 1; i >= 0; i--) { + const node = nodes[i]; + if (node.type === 'text') { + const lastColons = node.text.lastIndexOf('::'); + if (lastColons !== -1) { + return { nodeIndex: i, position: lastColons }; + } + } + } + return null; +} + +/** + * Intelligently truncate a function name, preserving context and function name. + * Handles library prefixes (e.g., "nvoglv64.dll!functionName") by processing + * only the function name portion. + */ +export function truncateFunctionName( + functionName: string, + maxLength: number +): string { + if (functionName.length <= maxLength) { + return functionName; + } + + // Check if there's a library prefix (e.g., "nvoglv64.dll!functionName") + const bangIndex = functionName.indexOf('!'); + let libraryPrefix = ''; + let funcPart = functionName; + + if (bangIndex !== -1) { + libraryPrefix = functionName.substring(0, bangIndex + 1); // Include the '!' + funcPart = functionName.substring(bangIndex + 1); + + // Calculate space available for function name after prefix + const availableForFunc = maxLength - libraryPrefix.length; + + if (availableForFunc <= 10) { + // Library prefix is too long, fall back to simple truncation + return functionName.substring(0, maxLength - 3) + '...'; + } + + // If the function part fits, return it + if (funcPart.length <= availableForFunc) { + return functionName; + } + + // Otherwise, truncate the function part smartly + maxLength = availableForFunc; + } + + // Parse into tree + const tree = parseFunctionNameTree(funcPart); + + // Find the last top-level :: separator to split prefix/suffix + const separator = findLastTopLevelSeparator(tree); + + if (separator === null) { + // No namespace separator - just truncate the whole thing + return libraryPrefix + truncateTree(tree, maxLength); + } + + // Split into prefix (context) and suffix (function name) + const { nodeIndex, position } = separator; + const sepNode = tree[nodeIndex]; + + // Build prefix nodes + const prefixNodes: TruncNode[] = tree.slice(0, nodeIndex); + if (position > 0) { + // Include part of the separator node before :: + prefixNodes.push({ + type: 'text', + text: sepNode.text.substring(0, position + 2), // Include the :: + children: [], + }); + } else { + prefixNodes.push({ + type: 'text', + text: '::', + children: [], + }); + } + + // Build suffix nodes + const suffixNodes: TruncNode[] = []; + const remainingText = sepNode.text.substring(position + 2); + if (remainingText) { + suffixNodes.push({ + type: 'text', + text: remainingText, + children: [], + }); + } + suffixNodes.push(...tree.slice(nodeIndex + 1)); + + const prefixLen = treeLength(prefixNodes); + const suffixLen = treeLength(suffixNodes); + + // Check if both fit + if (prefixLen + suffixLen <= maxLength) { + return libraryPrefix + funcPart; + } + + // Allocate space: prioritize suffix (function name), up to 70% + const maxSuffixLen = Math.floor(maxLength * 0.7); + let suffixAlloc: number; + let prefixAlloc: number; + + if (suffixLen <= maxSuffixLen) { + // Suffix fits fully, give rest to prefix + suffixAlloc = suffixLen; + prefixAlloc = maxLength - suffixLen; + } else { + // Both need truncation - give at least 30% to prefix for context + prefixAlloc = Math.floor(maxLength * 0.3); + suffixAlloc = maxLength - prefixAlloc; + } + + const truncatedPrefix = truncateTree(prefixNodes, prefixAlloc); + const truncatedSuffix = truncateTree(suffixNodes, suffixAlloc); + + return libraryPrefix + truncatedPrefix + truncatedSuffix; +} + +/** + * Format a function name with its library/resource name. + * Returns "libraryName!functionName" or just "functionName" if no library is available. + */ +export function formatFunctionNameWithLibrary( + funcIndex: number, + thread: Thread, + libs: Lib[] +): string { + const funcName = thread.stringTable.getString( + thread.funcTable.name[funcIndex] + ); + const resourceIndex = thread.funcTable.resource[funcIndex]; + + // If there's no resource or it's -1, just return the function name + if (resourceIndex === -1) { + return funcName; + } + + // Get the resource name + const resourceName = thread.stringTable.getString( + thread.resourceTable.name[resourceIndex] + ); + + // Get the library name if available + const libIndex = thread.resourceTable.lib[resourceIndex]; + if (libIndex !== null && libs) { + const lib = libs[libIndex]; + // Use the library name (e.g., "nvoglv64.dll") rather than full path + const libName = lib.name; + return `${libName}!${funcName}`; + } + + // Fall back to resource name if no library + if (resourceName && resourceName !== funcName) { + return `${resourceName}!${funcName}`; + } + + return funcName; +} + +/** + * Extract function data from a CallTree (function list tree). + * Formats function names with library/resource information when available. + */ +export function extractFunctionData( + tree: { + getRoots(): number[]; + getNodeData(nodeIndex: number): { + total: number; + self: number; + totalRelative: number; + selfRelative: number; + }; + }, + thread: Thread, + libs: Lib[] +): FunctionData[] { + const roots = tree.getRoots(); + return roots.map((nodeIndex) => { + const data = tree.getNodeData(nodeIndex); + // The node index IS the function index for function list trees + const formattedName = formatFunctionNameWithLibrary( + nodeIndex, + thread, + libs + ); + return { + ...data, + funcName: formattedName, + funcIndex: nodeIndex, // Preserve the function index + }; + }); +} + +/** + * Sort functions by total time (descending). + */ +export function sortByTotal(functions: FunctionData[]): FunctionData[] { + return [...functions].sort((a, b) => b.total - a.total); +} + +/** + * Sort functions by self time (descending). + */ +export function sortBySelf(functions: FunctionData[]): FunctionData[] { + return [...functions].sort((a, b) => b.self - a.self); +} + +/** + * Format a single function entry with optional handle. + */ +function formatFunctionEntry( + func: FunctionData, + sortKey: 'total' | 'self', + threadIndexes: Set, + functionMap: FunctionMap +): string { + const totalPct = (func.totalRelative * 100).toFixed(1); + const selfPct = (func.selfRelative * 100).toFixed(1); + const totalCount = Math.round(func.total); + const selfCount = Math.round(func.self); + + // Truncate function name to 120 characters (smart truncation preserves meaning) + const displayName = truncateFunctionName(func.funcName, 120); + + // Generate handle if FunctionMap is provided + const handle = functionMap.handleForFunction(threadIndexes, func.funcIndex); + const handleStr = `${handle}. `; + + if (sortKey === 'total') { + return ` ${handleStr}${displayName} - total: ${totalCount} (${totalPct}%), self: ${selfCount} (${selfPct}%)`; + } + return ` ${handleStr}${displayName} - self: ${selfCount} (${selfPct}%), total: ${totalCount} (${totalPct}%)`; +} + +/** + * Compute statistics for omitted functions. + */ +function computeOmittedStats( + omittedFunctions: FunctionData[] +): FunctionListStats | null { + if (omittedFunctions.length === 0) { + return null; + } + + const maxTotal = Math.max(...omittedFunctions.map((f) => f.total)); + const maxSelf = Math.max(...omittedFunctions.map((f) => f.self)); + const sumSelf = omittedFunctions.reduce((sum, f) => sum + f.self, 0); + + return { + omittedCount: omittedFunctions.length, + maxTotal, + maxSelf, + sumSelf, + }; +} + +/** + * Format a list of functions with a limit, showing statistics for omitted entries. + */ +export function formatFunctionList( + title: string, + functions: FunctionData[], + limit: number, + sortKey: 'total' | 'self', + threadIndexes: Set, + functionMap: FunctionMap +): FormattedFunctionList { + const displayedFunctions = functions.slice(0, limit); + const omittedFunctions = functions.slice(limit); + + const lines = displayedFunctions.map((func) => + formatFunctionEntry(func, sortKey, threadIndexes, functionMap) + ); + + const stats = computeOmittedStats(omittedFunctions); + + if (stats) { + lines.push(''); + lines.push( + ` ... (${stats.omittedCount} more functions omitted, ` + + `max total: ${Math.round(stats.maxTotal)}, ` + + `max self: ${Math.round(stats.maxSelf)}, ` + + `sum of self: ${Math.round(stats.sumSelf)})` + ); + } + + return { + title, + lines, + stats, + }; +} + +/** + * Create both top function lists (by total and by self). + */ +export function createTopFunctionLists( + functions: FunctionData[], + limit: number, + threadIndexes: Set, + functionMap: FunctionMap +): { byTotal: FormattedFunctionList; bySelf: FormattedFunctionList } { + const byTotal = formatFunctionList( + 'Top Functions (by total time)', + sortByTotal(functions), + limit, + 'total', + threadIndexes, + functionMap + ); + + const bySelf = formatFunctionList( + 'Top Functions (by self time)', + sortBySelf(functions), + limit, + 'self', + threadIndexes, + functionMap + ); + + return { byTotal, bySelf }; +} diff --git a/src/profile-query/function-map.ts b/src/profile-query/function-map.ts new file mode 100644 index 0000000000..ef002c04a0 --- /dev/null +++ b/src/profile-query/function-map.ts @@ -0,0 +1,69 @@ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +import { getThreadsKey } from 'firefox-profiler/profile-logic/profile-data'; +import type { + ThreadIndex, + IndexIntoFuncTable, + ThreadsKey, +} from 'firefox-profiler/types'; + +/** + * Represents a function identified by its thread and function index. + */ +export type FunctionId = { + threadIndexes: Set; + threadsKey: ThreadsKey; + funcIndex: IndexIntoFuncTable; +}; + +/** + * Maps function handles (like "f-1", "f-2") to (threadIndex, funcIndex) pairs. + * This provides a user-friendly way to reference functions in the CLI. + * + * Since each thread has its own funcTable, we need to store both the thread + * index and the function index to uniquely identify a function. + */ +export class FunctionMap { + _handleToFunction: Map = new Map(); + _nextHandleId: number = 1; + + /** + * Get or create a handle for a function. + * Returns the same handle if called multiple times with the same function. + */ + handleForFunction( + threadIndexes: Set, + funcIndex: IndexIntoFuncTable + ): string { + // Check if we already have a handle for this function + const threadsKey = getThreadsKey(threadIndexes); + for (const [handle, funcId] of this._handleToFunction.entries()) { + if (funcId.threadsKey === threadsKey && funcId.funcIndex === funcIndex) { + return handle; + } + } + + // Create a new handle + const handle = 'f-' + this._nextHandleId++; + this._handleToFunction.set(handle, { + threadIndexes, + threadsKey, + funcIndex, + }); + return handle; + } + + /** + * Look up a function by its handle. + * Throws an error if the handle is unknown. + */ + functionForHandle(functionHandle: string): FunctionId { + const funcId = this._handleToFunction.get(functionHandle); + if (funcId === undefined) { + throw new Error(`Unknown function ${functionHandle}`); + } + return funcId; + } +} diff --git a/src/profile-query/index.ts b/src/profile-query/index.ts new file mode 100644 index 0000000000..b680cbf15c --- /dev/null +++ b/src/profile-query/index.ts @@ -0,0 +1,679 @@ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +/* + * This implements a library for querying the contents of a profile. + * + * To use it it first needs to be built: + * yarn build-profile-query + * + * Then it can be used from an interactive node session: + * + * % node + * > const { ProfileQuerier } = (await import('./dist/profile-query.js')).default; + * undefined + * > const p1 = await ProfileQuerier.load("/Users/mstange/Downloads/merged-profile.json.gz"); + * > const p2 = await ProfileQuerier.load("https://profiler.firefox.com/from-url/http%3A%2F%2Fexample.com%2Fprofile.json/"); + * > const p3 = await ProfileQuerier.load("https://share.firefox.dev/4oLEjCw"); + */ + +import { + getProfile, + getProfileRootRange, +} from 'firefox-profiler/selectors/profile'; +import { + getAllCommittedRanges, + getSelectedThreadIndexes, +} from 'firefox-profiler/selectors/url-state'; +import { + commitRange, + popCommittedRanges, + changeSelectedThreads, +} from '../actions/profile-view'; +import { getThreadSelectors } from 'firefox-profiler/selectors/per-thread'; +import { TimestampManager } from './timestamps'; +import { ThreadMap } from './thread-map'; +import { FunctionMap } from './function-map'; +import { MarkerMap } from './marker-map'; +import { loadProfileFromFileOrUrl } from './loader'; +import { collectProfileInfo } from './formatters/profile-info'; +import { + collectThreadInfo, + collectThreadSamples, + collectThreadSamplesTopDown, + collectThreadSamplesBottomUp, + collectThreadFunctions, +} from './formatters/thread-info'; +import { + collectThreadMarkers, + collectMarkerStack, + collectMarkerInfo, +} from './formatters/marker-info'; +import { parseTimeValue } from './time-range-parser'; +import type { + StatusResult, + SessionContext, + WithContext, + FunctionExpandResult, + FunctionInfoResult, + ViewRangeResult, + ThreadInfoResult, + MarkerStackResult, + MarkerInfoResult, + ProfileInfoResult, + ThreadSamplesResult, + ThreadSamplesTopDownResult, + ThreadSamplesBottomUpResult, + ThreadMarkersResult, + ThreadFunctionsResult, + MarkerFilterOptions, + FunctionFilterOptions, +} from './types'; +import type { CallTreeCollectionOptions } from './formatters/call-tree'; + +import type { StartEndRange } from 'firefox-profiler/types'; +import type { Store } from '../types/store'; + +export class ProfileQuerier { + _store: Store; + _processIndexMap: Map; + _timestampManager: TimestampManager; + _threadMap: ThreadMap; + _functionMap: FunctionMap; + _markerMap: MarkerMap; + + constructor(store: Store, rootRange: StartEndRange) { + this._store = store; + this._processIndexMap = new Map(); + this._timestampManager = new TimestampManager(rootRange); + this._threadMap = new ThreadMap(); + this._functionMap = new FunctionMap(); + this._markerMap = new MarkerMap(); + + // Build process index map + const state = this._store.getState(); + const profile = getProfile(state); + const uniquePids = Array.from(new Set(profile.threads.map((t) => t.pid))); + uniquePids.forEach((pid, index) => { + this._processIndexMap.set(pid, index); + }); + } + + static async load(filePathOrUrl: string): Promise { + const { store, rootRange } = await loadProfileFromFileOrUrl(filePathOrUrl); + return new ProfileQuerier(store, rootRange); + } + + async profileInfo(): Promise> { + const result = await collectProfileInfo( + this._store, + this._timestampManager, + this._threadMap, + this._processIndexMap + ); + return { ...result, context: this._getContext() }; + } + + async threadInfo( + threadHandle?: string + ): Promise> { + const result = await collectThreadInfo( + this._store, + this._timestampManager, + this._threadMap, + threadHandle + ); + return { ...result, context: this._getContext() }; + } + + async threadSamples( + threadHandle?: string + ): Promise> { + const result = await collectThreadSamples( + this._store, + this._threadMap, + this._functionMap, + threadHandle + ); + return { ...result, context: this._getContext() }; + } + + async threadSamplesTopDown( + threadHandle?: string, + callTreeOptions?: CallTreeCollectionOptions + ): Promise> { + const result = await collectThreadSamplesTopDown( + this._store, + this._threadMap, + this._functionMap, + threadHandle, + callTreeOptions + ); + return { ...result, context: this._getContext() }; + } + + async threadSamplesBottomUp( + threadHandle?: string, + callTreeOptions?: CallTreeCollectionOptions + ): Promise> { + const result = await collectThreadSamplesBottomUp( + this._store, + this._threadMap, + this._functionMap, + threadHandle, + callTreeOptions + ); + return { ...result, context: this._getContext() }; + } + + /** + * Push a view range selection (commit a range). + * Supports multiple formats: + * - Marker handle: "m-1" (uses marker's start/end times) + * - Timestamp names: "ts-6,ts-7" + * - Seconds: "2.7,3.1" (default if no suffix) + * - Milliseconds: "2700ms,3100ms" + * - Percentage: "10%,20%" + */ + async pushViewRange(rangeName: string): Promise { + const state = this._store.getState(); + const rootRange = getProfileRootRange(state); + const zeroAt = rootRange.start; + + let startTimestamp: number; + let endTimestamp: number; + let markerInfo: ViewRangeResult['markerInfo'] = undefined; + + // Check if it's a marker handle (e.g., "m-1") + if (rangeName.startsWith('m-') && !rangeName.includes(',')) { + // Look up the marker + const { threadIndexes, markerIndex } = + this._markerMap.markerForHandle(rangeName); + const threadSelectors = getThreadSelectors(threadIndexes); + const fullMarkerList = threadSelectors.getFullMarkerList(state); + const marker = fullMarkerList[markerIndex]; + + if (!marker) { + throw new Error(`Marker ${rangeName} not found`); + } + + // Check if marker is an interval marker (has end time) + if (marker.end === null) { + throw new Error( + `Marker ${rangeName} is an instant marker (no duration). Only interval markers can be used for zoom ranges.` + ); + } + + startTimestamp = marker.start; + endTimestamp = marker.end; + + // Store marker info for enhanced output + const threadHandle = + this._threadMap.handleForThreadIndexes(threadIndexes); + const friendlyThreadName = threadSelectors.getFriendlyThreadName(state); + markerInfo = { + markerHandle: rangeName, + markerName: marker.name, + threadHandle, + threadName: friendlyThreadName, + }; + } else { + // Split at comma for traditional range format + const parts = rangeName.split(',').map((s) => s.trim()); + if (parts.length !== 2) { + throw new Error( + `Invalid range format: "${rangeName}". Expected a marker handle (e.g., "m-1") or two comma-separated values (e.g., "2.7,3.1" or "ts-6,ts-7")` + ); + } + + // Parse start and end values (supports multiple formats) + const parsedStart = parseTimeValue(parts[0], rootRange); + const parsedEnd = parseTimeValue(parts[1], rootRange); + + // If parseTimeValue returns null, it's a timestamp name - look it up + startTimestamp = + parsedStart ?? + (() => { + const ts = this._timestampManager.timestampForName(parts[0]); + if (ts === null) { + throw new Error(`Unknown timestamp name: "${parts[0]}"`); + } + return ts; + })(); + + endTimestamp = + parsedEnd ?? + (() => { + const ts = this._timestampManager.timestampForName(parts[1]); + if (ts === null) { + throw new Error(`Unknown timestamp name: "${parts[1]}"`); + } + return ts; + })(); + } + + // Get or create timestamp names for display + const startName = this._timestampManager.nameForTimestamp(startTimestamp); + const endName = this._timestampManager.nameForTimestamp(endTimestamp); + + // Convert absolute timestamps to relative timestamps. + // commitRange expects timestamps relative to the profile start (zeroAt), + // but we have absolute timestamps. The getCommittedRange selector will + // add zeroAt back to them. + const relativeStart = startTimestamp - zeroAt; + const relativeEnd = endTimestamp - zeroAt; + + // Dispatch the commitRange action with relative timestamps + this._store.dispatch(commitRange(relativeStart, relativeEnd)); + + // Get the zoom depth after pushing + const newState = this._store.getState(); + const committedRanges = getAllCommittedRanges(newState); + const zoomDepth = committedRanges.length; + + // Calculate duration + const duration = endTimestamp - startTimestamp; + + const message = `Pushed view range: ${startName} (${this._timestampManager.timestampString(startTimestamp)}) to ${endName} (${this._timestampManager.timestampString(endTimestamp)})`; + + return { + type: 'view-range', + action: 'push', + range: { + start: startTimestamp, + startName, + end: endTimestamp, + endName, + }, + message, + duration, + zoomDepth, + markerInfo, + }; + } + + /** + * Pop the most recent view range selection. + */ + async popViewRange(): Promise { + const state = this._store.getState(); + const committedRanges = getAllCommittedRanges(state); + + if (committedRanges.length === 0) { + throw new Error('No view ranges to pop'); + } + + // Pop the last committed range (index = length - 1) + const poppedIndex = committedRanges.length - 1; + this._store.dispatch(popCommittedRanges(poppedIndex)); + + const poppedRange = committedRanges[poppedIndex]; + + // Convert relative timestamps back to absolute timestamps + // committedRanges stores timestamps relative to the profile start (zeroAt) + const rootRange = getProfileRootRange(state); + const zeroAt = rootRange.start; + const absoluteStart = poppedRange.start + zeroAt; + const absoluteEnd = poppedRange.end + zeroAt; + + const startName = this._timestampManager.nameForTimestamp(absoluteStart); + const endName = this._timestampManager.nameForTimestamp(absoluteEnd); + + const message = `Popped view range: ${startName} (${this._timestampManager.timestampString(absoluteStart)}) to ${endName} (${this._timestampManager.timestampString(absoluteEnd)})`; + + return { + type: 'view-range', + action: 'pop', + range: { + start: absoluteStart, + startName, + end: absoluteEnd, + endName, + }, + message, + }; + } + + /** + * Clear all view range selections (return to root view). + */ + async clearViewRange(): Promise { + const state = this._store.getState(); + const committedRanges = getAllCommittedRanges(state); + + if (committedRanges.length === 0) { + throw new Error('No view ranges to clear'); + } + + // Pop all committed ranges (index 0 pops from the first one) + this._store.dispatch(popCommittedRanges(0)); + + const rootRange = getProfileRootRange(state); + const startName = this._timestampManager.nameForTimestamp(rootRange.start); + const endName = this._timestampManager.nameForTimestamp(rootRange.end); + + const message = `Cleared all view ranges, returned to full profile: ${startName} (${this._timestampManager.timestampString(rootRange.start)}) to ${endName} (${this._timestampManager.timestampString(rootRange.end)})`; + + return { + type: 'view-range', + action: 'pop', + range: { + start: rootRange.start, + startName, + end: rootRange.end, + endName, + }, + message, + }; + } + + /** + * Select one or more threads by handle (e.g., "t-0" or "t-0,t-1,t-2"). + */ + async threadSelect(threadHandle: string): Promise { + const threadIndexes = this._threadMap.threadIndexesForHandle(threadHandle); + + // Change the selected threads in the Redux store + this._store.dispatch(changeSelectedThreads(threadIndexes)); + + const state = this._store.getState(); + const profile = getProfile(state); + + if (threadIndexes.size === 1) { + const threadIndex = Array.from(threadIndexes)[0]; + const thread = profile.threads[threadIndex]; + return `Selected thread: ${threadHandle} (${thread.name})`; + } + + const names = Array.from(threadIndexes) + .map((idx) => profile.threads[idx].name) + .join(', '); + return `Selected ${threadIndexes.size} threads: ${threadHandle} (${names})`; + } + + /** + * Get current session context for display in command outputs. + * This is a lightweight version of getStatus() that includes only + * the current view range (not the full stack). + */ + private _getContext(): SessionContext { + const state = this._store.getState(); + const profile = getProfile(state); + const rootRange = getProfileRootRange(state); + const committedRanges = getAllCommittedRanges(state); + const selectedThreadIndexes = getSelectedThreadIndexes(state); + + // Get selected threads info + const selectedThreadHandle = + selectedThreadIndexes.size > 0 + ? this._threadMap.handleForThreadIndexes(selectedThreadIndexes) + : null; + + const selectedThreads = Array.from(selectedThreadIndexes).map( + (threadIndex) => ({ + threadIndex, + name: profile.threads[threadIndex].name, + }) + ); + + // Get current (most recent) view range if any + const zeroAt = rootRange.start; + let currentViewRange = null; + if (committedRanges.length > 0) { + const range = committedRanges[committedRanges.length - 1]; + const absoluteStart = range.start + zeroAt; + const absoluteEnd = range.end + zeroAt; + const startName = this._timestampManager.nameForTimestamp(absoluteStart); + const endName = this._timestampManager.nameForTimestamp(absoluteEnd); + currentViewRange = { + start: absoluteStart, + startName, + end: absoluteEnd, + endName, + }; + } + + return { + selectedThreadHandle, + selectedThreads, + currentViewRange, + rootRange: { + start: rootRange.start, + end: rootRange.end, + }, + }; + } + + /** + * Get current session status including selected threads and view ranges. + */ + async getStatus(): Promise { + const state = this._store.getState(); + const profile = getProfile(state); + const rootRange = getProfileRootRange(state); + const committedRanges = getAllCommittedRanges(state); + const selectedThreadIndexes = getSelectedThreadIndexes(state); + + // Get selected threads info + const selectedThreadHandle = + selectedThreadIndexes.size > 0 + ? this._threadMap.handleForThreadIndexes(selectedThreadIndexes) + : null; + + const selectedThreads = Array.from(selectedThreadIndexes).map( + (threadIndex) => ({ + threadIndex, + name: profile.threads[threadIndex].name, + }) + ); + + // Collect view ranges + const zeroAt = rootRange.start; + const viewRanges = committedRanges.map((range) => { + const absoluteStart = range.start + zeroAt; + const absoluteEnd = range.end + zeroAt; + const startName = this._timestampManager.nameForTimestamp(absoluteStart); + const endName = this._timestampManager.nameForTimestamp(absoluteEnd); + return { + start: absoluteStart, + startName, + end: absoluteEnd, + endName, + }; + }); + + return { + type: 'status', + selectedThreadHandle, + selectedThreads, + viewRanges, + rootRange: { + start: rootRange.start, + end: rootRange.end, + }, + }; + } + + /** + * Expand a function handle to show the full untruncated name. + */ + async functionExpand( + functionHandle: string + ): Promise> { + const state = this._store.getState(); + const profile = getProfile(state); + + // Look up the function + const { threadIndexes, funcIndex } = + this._functionMap.functionForHandle(functionHandle); + + const threadSelectors = getThreadSelectors(threadIndexes); + const thread = threadSelectors.getFilteredThread(state); + const funcName = thread.stringTable.getString( + thread.funcTable.name[funcIndex] + ); + const resourceIndex = thread.funcTable.resource[funcIndex]; + + // Get library prefix if available + let library: string | undefined; + if (resourceIndex !== -1 && thread.resourceTable) { + const libIndex = thread.resourceTable.lib[resourceIndex]; + if (libIndex !== null && libIndex !== undefined && profile.libs) { + const lib = profile.libs[libIndex]; + library = lib.name; + } + } + + const fullName = library ? `${library}!${funcName}` : funcName; + const threadHandle = this._threadMap.handleForThreadIndexes(threadIndexes); + + return { + type: 'function-expand', + functionHandle, + funcIndex, + threadHandle, + name: funcName, + fullName, + library, + context: this._getContext(), + }; + } + + /** + * Show detailed information about a function. + */ + async functionInfo( + functionHandle: string + ): Promise> { + const state = this._store.getState(); + const profile = getProfile(state); + + // Look up the function + const { threadIndexes, funcIndex } = + this._functionMap.functionForHandle(functionHandle); + + const threadSelectors = getThreadSelectors(threadIndexes); + const thread = threadSelectors.getFilteredThread(state); + const threadHandle = this._threadMap.handleForThreadIndexes(threadIndexes); + + const funcName = thread.stringTable.getString( + thread.funcTable.name[funcIndex] + ); + const resourceIndex = thread.funcTable.resource[funcIndex]; + const isJS = thread.funcTable.isJS[funcIndex]; + const relevantForJS = thread.funcTable.relevantForJS[funcIndex]; + + let resource: FunctionInfoResult['resource']; + let library: FunctionInfoResult['library']; + let libraryName: string | undefined; + + // Add resource info if available + if (resourceIndex !== -1 && thread.resourceTable) { + const resourceName = thread.stringTable.getString( + thread.resourceTable.name[resourceIndex] + ); + resource = { + name: resourceName, + index: resourceIndex, + }; + + const libIndex = thread.resourceTable.lib[resourceIndex]; + if ( + libIndex !== null && + libIndex !== undefined && + libIndex >= 0 && + profile.libs + ) { + const lib = profile.libs[libIndex]; + libraryName = lib.name; + library = { + name: lib.name, + path: lib.path, + debugName: lib.debugName, + debugPath: lib.debugPath, + breakpadId: lib.breakpadId, + }; + } + } + + const fullName = libraryName ? `${libraryName}!${funcName}` : funcName; + + return { + type: 'function-info', + functionHandle, + funcIndex, + threadHandle, + threadName: thread.name, + name: funcName, + fullName, + isJS, + relevantForJS, + resource, + library, + context: this._getContext(), + }; + } + + /** + * List markers for a thread with aggregated statistics. + */ + async threadMarkers( + threadHandle?: string, + filterOptions?: MarkerFilterOptions + ): Promise> { + const result = await collectThreadMarkers( + this._store, + this._threadMap, + this._markerMap, + threadHandle, + filterOptions + ); + return { ...result, context: this._getContext() }; + } + + /** + * List all functions for a thread with their CPU percentages. + * Supports filtering by search string, minimum self time, and limit. + */ + async threadFunctions( + threadHandle?: string, + filterOptions?: FunctionFilterOptions + ): Promise> { + const result = await collectThreadFunctions( + this._store, + this._threadMap, + this._functionMap, + threadHandle, + filterOptions + ); + return { ...result, context: this._getContext() }; + } + + /** + * Show detailed information about a specific marker. + */ + async markerInfo( + markerHandle: string + ): Promise> { + const result = await collectMarkerInfo( + this._store, + this._markerMap, + this._threadMap, + markerHandle + ); + return { ...result, context: this._getContext() }; + } + + async markerStack( + markerHandle: string + ): Promise> { + const result = await collectMarkerStack( + this._store, + this._markerMap, + this._threadMap, + markerHandle + ); + return { ...result, context: this._getContext() }; + } +} diff --git a/src/profile-query/loader.ts b/src/profile-query/loader.ts new file mode 100644 index 0000000000..d3b0cdbc69 --- /dev/null +++ b/src/profile-query/loader.ts @@ -0,0 +1,131 @@ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +import * as fs from 'fs'; + +import createStore from '../app-logic/create-store'; +import { unserializeProfileOfArbitraryFormat } from '../profile-logic/process-profile'; +import { finalizeProfileView, loadProfile } from '../actions/receive-profile'; +import { getProfileRootRange } from 'firefox-profiler/selectors/profile'; +import { + extractProfileUrlFromProfilerUrl, + fetchProfile, +} from '../utils/profile-fetch'; +import type { TemporaryError } from '../utils/errors'; +import type { Store } from '../types/store'; +import type { StartEndRange } from 'firefox-profiler/types'; + +/** + * Helper function to detect if the input is a URL + */ +function isUrl(input: string): boolean { + return input.startsWith('http://') || input.startsWith('https://'); +} + +/** + * Helper function to follow redirects and get the final URL. + * This is useful for short URLs like https://share.firefox.dev/4oLEjCw + */ +async function followRedirects(url: string): Promise { + const response = await fetch(url, { + method: 'HEAD', + redirect: 'follow', + }); + return response.url; +} + +export interface LoadResult { + store: Store; + rootRange: StartEndRange; +} + +/** + * Load a profile from a file path or URL. + * Returns a store and root range that can be used to construct a ProfileQuerier. + */ +export async function loadProfileFromFileOrUrl( + filePathOrUrl: string +): Promise { + const store = createStore(); + console.log(`Loading profile from ${filePathOrUrl}`); + + if (isUrl(filePathOrUrl)) { + // Handle URL input + let finalUrl = filePathOrUrl; + + // If it's a profiler.firefox.com URL (or short URL that redirects to one), + // extract the actual profile URL from it + if ( + filePathOrUrl.includes('profiler.firefox.com') || + filePathOrUrl.includes('share.firefox.dev') + ) { + // Follow redirects for short URLs + if (filePathOrUrl.includes('share.firefox.dev')) { + console.log('Following redirect from short URL...'); + finalUrl = await followRedirects(filePathOrUrl); + console.log(`Redirected to: ${finalUrl}`); + } + + // Extract the profile URL from the profiler.firefox.com URL + const profileUrl = extractProfileUrlFromProfilerUrl(finalUrl); + if (profileUrl) { + console.log(`Extracted profile URL: ${profileUrl}`); + finalUrl = profileUrl; + } else { + throw new Error( + `Unable to extract profile URL from profiler URL: ${finalUrl}` + ); + } + } + + // Fetch the profile using shared utility + console.log(`Fetching profile from ${finalUrl}`); + const result = await fetchProfile({ + url: finalUrl, + onTemporaryError: (e: TemporaryError) => { + if (e.attempt) { + console.log(`Retry ${e.attempt.count}/${e.attempt.total}...`); + } + }, + }); + + // Check if this is a zip file - not yet supported in CLI + if (result.responseType === 'ZIP') { + throw new Error( + 'Zip files are not yet supported in the CLI. ' + + 'Please extract the profile from the zip file first, or use the web interface at profiler.firefox.com' + ); + } + + // Extract the profile data + const profile = await unserializeProfileOfArbitraryFormat(result.profile); + if (profile === undefined) { + throw new Error('Unable to parse the profile.'); + } + + await store.dispatch(loadProfile(profile, {}, true)); + await store.dispatch(finalizeProfileView()); + const state = store.getState(); + const rootRange = getProfileRootRange(state); + return { store, rootRange }; + } + + // Handle file path input + // Read the raw bytes from the file. It might be a JSON file, but it could also + // be a binary file, e.g. a .json.gz file, or any of the binary formats supported + // by our importers. + const bytes = fs.readFileSync(filePathOrUrl, null); + + // Load the profile. + const profile = await unserializeProfileOfArbitraryFormat(bytes); + if (profile === undefined) { + throw new Error('Unable to parse the profile.'); + } + + await store.dispatch(loadProfile(profile, {}, true)); + await store.dispatch(finalizeProfileView()); + const state = store.getState(); + const rootRange = getProfileRootRange(state); + return { store, rootRange }; +} diff --git a/src/profile-query/marker-map.ts b/src/profile-query/marker-map.ts new file mode 100644 index 0000000000..ef464a860c --- /dev/null +++ b/src/profile-query/marker-map.ts @@ -0,0 +1,72 @@ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +import { getThreadsKey } from 'firefox-profiler/profile-logic/profile-data'; +import type { + ThreadIndex, + MarkerIndex, + ThreadsKey, +} from 'firefox-profiler/types'; + +/** + * Represents a marker identified by its thread and marker index. + */ +export type MarkerId = { + threadIndexes: Set; + threadsKey: ThreadsKey; + markerIndex: MarkerIndex; +}; + +/** + * Maps marker handles (like "m-1", "m-2") to (threadIndex, markerIndex) pairs. + * This provides a user-friendly way to reference markers in the CLI. + * + * Since each thread has its own marker list, we need to store both the thread + * index and the marker index to uniquely identify a marker. + */ +export class MarkerMap { + _handleToMarker: Map = new Map(); + _nextHandleId: number = 1; + + /** + * Get or create a handle for a marker. + * Returns the same handle if called multiple times with the same marker. + */ + handleForMarker( + threadIndexes: Set, + markerIndex: MarkerIndex + ): string { + // Check if we already have a handle for this marker + const threadsKey = getThreadsKey(threadIndexes); + for (const [handle, markerId] of this._handleToMarker.entries()) { + if ( + markerId.threadsKey === threadsKey && + markerId.markerIndex === markerIndex + ) { + return handle; + } + } + + // Create a new handle + const handle = 'm-' + this._nextHandleId++; + this._handleToMarker.set(handle, { + threadIndexes, + threadsKey, + markerIndex, + }); + return handle; + } + + /** + * Look up a marker by its handle. + * Throws an error if the handle is unknown. + */ + markerForHandle(markerHandle: string): MarkerId { + const markerId = this._handleToMarker.get(markerHandle); + if (markerId === undefined) { + throw new Error(`Unknown marker ${markerHandle}`); + } + return markerId; + } +} diff --git a/src/profile-query/process-thread-list.ts b/src/profile-query/process-thread-list.ts new file mode 100644 index 0000000000..ca8fd9a6d3 --- /dev/null +++ b/src/profile-query/process-thread-list.ts @@ -0,0 +1,178 @@ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +export type ThreadInfo = { + threadIndex: number; + name: string; + cpuMs: number; + pid: string; +}; + +export type ProcessInfo = { + pid: string; + processIndex: number; + name: string; + cpuMs: number; + threads: Array<{ threadIndex: number; name: string; cpuMs: number }>; +}; + +export type ProcessListItem = { + processIndex: number; + pid: string; + name: string; + cpuMs: number; + threads: Array<{ threadIndex: number; name: string; cpuMs: number }>; + remainingThreads?: { + count: number; + combinedCpuMs: number; + maxCpuMs: number; + }; + startTime?: number; + endTime?: number | null; +}; + +export type ProcessThreadListResult = { + processes: ProcessListItem[]; + remainingProcesses?: { + count: number; + combinedCpuMs: number; + maxCpuMs: number; + }; +}; + +/** + * Build a hierarchical list of processes and threads for display. + * + * Shows: + * - Top 5 processes by CPU time + * - Any additional processes that contain threads from the top 20 threads overall + * - For each process, shows its top threads: + * - If the process has threads in the top 20 overall, show ALL of those threads + * - Otherwise, show up to 5 threads + * - Summary of remaining threads if any + * - Summary of remaining processes if any + */ +export function buildProcessThreadList( + threads: ThreadInfo[], + processIndexMap: Map +): ProcessThreadListResult { + // Aggregate threads by process + const processCPUMap = new Map(); + + threads.forEach((thread) => { + const { pid, threadIndex, name, cpuMs } = thread; + const existing = processCPUMap.get(pid); + + if (existing) { + existing.cpuMs += cpuMs; + existing.threads.push({ threadIndex, name, cpuMs }); + } else { + const processIndex = processIndexMap.get(pid); + if (processIndex === undefined) { + throw new Error(`Process index not found for pid ${pid}`); + } + // Infer process name from first thread's process info + // In real usage, this would come from the thread's processName field + processCPUMap.set(pid, { + pid, + processIndex, + name: pid, // Will be overridden by caller + cpuMs, + threads: [{ threadIndex, name, cpuMs }], + }); + } + }); + + // Sort threads within each process by CPU + processCPUMap.forEach((processInfo) => { + processInfo.threads.sort((a, b) => b.cpuMs - a.cpuMs); + }); + + // Get all processes sorted by CPU + const allProcesses = Array.from(processCPUMap.values()); + allProcesses.sort((a, b) => b.cpuMs - a.cpuMs); + + // Get top 5 processes by CPU + const top5ProcessPids = new Set(allProcesses.slice(0, 5).map((p) => p.pid)); + + // Get top 20 threads overall + const allThreadsSorted = [...threads].sort((a, b) => b.cpuMs - a.cpuMs); + const top20Threads = allThreadsSorted.slice(0, 20); + const top20ThreadPids = new Set(top20Threads.map((t) => t.pid)); + + // Build a set of threadIndexes that are in the top 20 + const top20ThreadIndexes = new Set(top20Threads.map((t) => t.threadIndex)); + + // Determine which processes to show + const processesToShow = allProcesses.filter( + (p) => top5ProcessPids.has(p.pid) || top20ThreadPids.has(p.pid) + ); + + // Build the result list + const result: ProcessListItem[] = processesToShow.map((processInfo) => { + const { pid, processIndex, name, cpuMs, threads: allThreads } = processInfo; + + // Separate threads into top-20 and others + const top20ThreadsInProcess = allThreads.filter((t) => + top20ThreadIndexes.has(t.threadIndex) + ); + const otherThreads = allThreads.filter( + (t) => !top20ThreadIndexes.has(t.threadIndex) + ); + + // Show all top-20 threads, plus fill up to 5 with other threads if needed + const threadsToShow = [...top20ThreadsInProcess]; + const remainingSlots = Math.max(0, 5 - threadsToShow.length); + threadsToShow.push(...otherThreads.slice(0, remainingSlots)); + + // Calculate remaining threads summary + const remainingThreads = otherThreads.slice(remainingSlots); + let remainingThreadsInfo: ProcessListItem['remainingThreads'] = undefined; + + if (remainingThreads.length > 0) { + const combinedCpuMs = remainingThreads.reduce( + (sum, t) => sum + t.cpuMs, + 0 + ); + const maxCpuMs = Math.max(...remainingThreads.map((t) => t.cpuMs)); + remainingThreadsInfo = { + count: remainingThreads.length, + combinedCpuMs, + maxCpuMs, + }; + } + + return { + processIndex, + pid, + name, + cpuMs, + threads: threadsToShow, + remainingThreads: remainingThreadsInfo, + }; + }); + + // Calculate remaining processes summary + const remainingProcesses = allProcesses.slice(processesToShow.length); + let remainingProcessesInfo: ProcessThreadListResult['remainingProcesses'] = + undefined; + + if (remainingProcesses.length > 0) { + const combinedCpuMs = remainingProcesses.reduce( + (sum, p) => sum + p.cpuMs, + 0 + ); + const maxCpuMs = Math.max(...remainingProcesses.map((p) => p.cpuMs)); + remainingProcessesInfo = { + count: remainingProcesses.length, + combinedCpuMs, + maxCpuMs, + }; + } + + return { + processes: result, + remainingProcesses: remainingProcessesInfo, + }; +} diff --git a/src/profile-query/thread-map.ts b/src/profile-query/thread-map.ts new file mode 100644 index 0000000000..09efc233b5 --- /dev/null +++ b/src/profile-query/thread-map.ts @@ -0,0 +1,47 @@ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +import type { ThreadIndex } from 'firefox-profiler/types'; + +/** + * Maps thread handles (like "t-0", "t-1") to thread indices. + * This provides a user-friendly way to reference threads in the CLI. + * Supports multi-thread handles like "t-4,t-2,t-6" for selecting multiple threads. + */ +export class ThreadMap { + _map: Map = new Map(); + + handleForThreadIndex(threadIndex: ThreadIndex): string { + const handle = 't-' + threadIndex; + if (!this._map.has(handle)) { + this._map.set(handle, threadIndex); + } + return handle; + } + + threadIndexForHandle(threadHandle: string): ThreadIndex { + const threadIndex = this._map.get(threadHandle); + if (threadIndex === undefined) { + throw new Error(`Unknown thread ${threadHandle}`); + } + return threadIndex; + } + + threadIndexesForHandle(threadHandle: string): Set { + const handles = threadHandle.split(',').map((s) => s.trim()); + const indices = handles.map((handle) => { + const idx = this._map.get(handle); + if (idx === undefined) { + throw new Error(`Unknown thread ${handle}`); + } + return idx; + }); + return new Set(indices); + } + + handleForThreadIndexes(threadIndexes: Set): string { + const sorted = Array.from(threadIndexes).sort((a, b) => a - b); + return sorted.map((idx) => this.handleForThreadIndex(idx)).join(','); + } +} diff --git a/src/profile-query/time-range-parser.ts b/src/profile-query/time-range-parser.ts new file mode 100644 index 0000000000..3266d1762d --- /dev/null +++ b/src/profile-query/time-range-parser.ts @@ -0,0 +1,63 @@ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +import type { StartEndRange } from 'firefox-profiler/types'; + +/** + * Parse a time value from the push-range command. + * Supports multiple formats: + * - Timestamp names: "ts-6" (returns null, caller should look up in timestamp manager) + * - Seconds: "2.7" or "2.7s" (relative to profile start) + * - Milliseconds: "2700ms" (relative to profile start) + * - Percentage: "10%" (percentage through profile duration) + * + * Returns absolute timestamp in milliseconds, or null if it's a timestamp name. + */ +export function parseTimeValue( + value: string, + rootRange: StartEndRange +): number | null { + // Check if it's a timestamp name (starts with "ts") + if (value.startsWith('ts')) { + // Return null to signal caller should look it up + return null; + } + + // Check if it's a percentage + if (value.endsWith('%')) { + const percent = parseFloat(value.slice(0, -1)); + if (isNaN(percent)) { + throw new Error(`Invalid percentage: "${value}"`); + } + const duration = rootRange.end - rootRange.start; + return rootRange.start + (percent / 100) * duration; + } + + // Check if it's milliseconds + if (value.endsWith('ms')) { + const ms = parseFloat(value.slice(0, -2)); + if (isNaN(ms)) { + throw new Error(`Invalid milliseconds: "${value}"`); + } + return rootRange.start + ms; + } + + // Check if it's seconds with 's' suffix + if (value.endsWith('s')) { + const seconds = parseFloat(value.slice(0, -1)); + if (isNaN(seconds)) { + throw new Error(`Invalid seconds: "${value}"`); + } + return rootRange.start + seconds * 1000; + } + + // Default: treat as seconds (no suffix) + const seconds = parseFloat(value); + if (isNaN(seconds)) { + throw new Error( + `Invalid time value: "${value}". Expected timestamp name (ts-X), seconds (2.7), milliseconds (2700ms), or percentage (10%)` + ); + } + return rootRange.start + seconds * 1000; +} diff --git a/src/profile-query/timestamps.ts b/src/profile-query/timestamps.ts new file mode 100644 index 0000000000..24d4d65dcf --- /dev/null +++ b/src/profile-query/timestamps.ts @@ -0,0 +1,312 @@ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +/** + * TimestampManager provides compact, hierarchical names for timestamps to make + * them LLM-friendly and token-efficient. This allows LLMs to reference specific + * time points when using ProfileQuerier (e.g., for range selections). + * + * Naming scheme: + * - In-range timestamps [start, end]: "ts-" prefix (e.g., ts-0, ts-K, ts-gK) + * - Before-start timestamps: "ts<" prefix with exponential buckets (ts<0, ts<1, ...) + * - After-end timestamps: "ts>" prefix with exponential buckets (ts>0, ts>1, ...) + * + * The hierarchical algorithm creates shorter names for timestamps that are + * referenced early, with names growing longer as you drill down between existing + * marks. This keeps token usage low while maintaining precision. + */ + +import type { StartEndRange } from 'firefox-profiler/types'; +import { bisectionRightByKey } from 'firefox-profiler/utils/bisect'; +import { formatTimestamp } from 'firefox-profiler/utils/format-numbers'; + +/** + * Build the character alphabet used for timestamp names. + * Order: 0-9, a-z, A-Z (62 characters total). + */ +function _makeChars(): string[] { + const chars = []; + for (let i = 0; i < 10; i++) { + chars.push('' + i); + } + const aLower = 'a'.charCodeAt(0); + const aUpper = 'A'.charCodeAt(0); + for (let i = 0; i < 26; i++) { + chars.push(String.fromCharCode(aLower + i)); + chars.push(String.fromCharCode(aUpper + i)); + } + + return chars; +} + +function assert(condition: boolean) { + if (!condition) { + throw new Error('assert failed'); + } +} + +/** + * Item represents a node in the hierarchical timestamp tree. Each item + * corresponds to a specific timestamp and has an index in its level's + * character space (0-61). Items lazily create children as timestamps + * between existing marks are requested. + */ +class Item { + index: number; + timestamp: number; + + // Children are created on-demand and ordered by timestamp. + _children: Item[] | null = null; + + constructor(index: number, start: number) { + this.index = index; + this.timestamp = start; + } + + /** + * Get a hierarchical name for a timestamp within this item's range. + * + * Algorithm: + * 1. If timestamp matches an existing mark, return its name + * 2. Find the two adjacent marks that bracket the timestamp + * 3. If marks are adjacent (indices differ by 1), recurse into the left mark + * 4. Otherwise, interpolate to find a new index and insert a new mark + * + * This ensures timestamps requested early get shorter names, with names + * growing longer as you drill down between existing marks. + */ + nameForTimestamp(ts: number, end: number, prefix: string): string { + const start = this.timestamp; + if (ts < start || ts > end) { + throw new Error('out of range'); + } + if (ts === start) { + return prefix; + } + // Lazily initialize with boundary marks at indices 0 and MARKS_PER_LEVEL-1. + if (this._children === null) { + this._children = [new Item(0, start), new Item(MARKS_PER_LEVEL - 1, end)]; + } + // Binary search to find the left mark that brackets this timestamp. + const i = + bisectionRightByKey(this._children, ts, (item) => item.timestamp) - 1; + assert(i >= 0); + assert(i + 1 < this._children.length); + const left = this._children[i]; + const right = this._children[i + 1]; + assert(ts >= left.timestamp); + assert(ts < right.timestamp); + const leftIndex = left.index; + const rightIndex = right.index; + const indexDelta = rightIndex - leftIndex; + const rightTimestamp = right.timestamp; + // If marks are adjacent, recurse into the left mark's subrange. + if (indexDelta === 1) { + return left.nameForTimestamp( + ts, + rightTimestamp, + prefix + CHARS[leftIndex] + ); + } + // Interpolate to find a new index between the two marks. + const leftTimestamp = left.timestamp; + const relativeTimestamp = ts - leftTimestamp; + const timestampDelta = rightTimestamp - leftTimestamp; + const itemIndex = + leftIndex + + 1 + + Math.floor((relativeTimestamp / timestampDelta) * (indexDelta - 1)); + assert(itemIndex > leftIndex); + assert(itemIndex < rightIndex); + // Insert the new mark and return its name. + const item = new Item(itemIndex, ts); + this._children.splice(i + 1, 0, item); + return prefix + CHARS[itemIndex]; + } +} + +// Character alphabet: 0-9, a-z, A-Z (62 characters) +const CHARS = _makeChars(); +const MARKS_PER_LEVEL = CHARS.length; + +/** + * TimestampManager creates compact, hierarchical names for timestamps. + * + * Example names for range [1000, 2000]: + * - 1000 → "ts-0" (range start) + * - 2000 → "ts-Z" (range end) + * - 1500 → "ts-K" (middle of range) + * - 1000.1 → "ts-04" (between ts-0 and ts-1, drills into ts-0's subrange) + * - 500 → "ts<0K" (before range start, in first bucket before-range) + * - 2500 → "ts>0K" (after range end, in first bucket after-range) + * + * Out-of-bounds timestamps use exponentially doubling buckets: + * - ts<0: [start - 1×length, start] + * - ts<1: [start - 2×length, start - 1×length] + * - ts<2: [start - 4×length, start - 2×length] + * - ts buckets extending to the right. + */ +export class TimestampManager { + _rootRangeStart: number; + _rootRangeEnd: number; + _rootRangeLength: number; + _mainTree: Item; + // Trees for exponentially-spaced buckets before/after the main range. + // Keys are bucket numbers (0, 1, 2, ...), created on-demand. + _beforeBuckets: Map = new Map(); + _afterBuckets: Map = new Map(); + // Reverse lookup: timestamp name → actual timestamp value. + // Only contains names that have been returned by nameForTimestamp(). + _nameToTimestamp: Map = new Map(); + + constructor(rootRange: StartEndRange) { + this._rootRangeStart = rootRange.start; + this._rootRangeEnd = rootRange.end; + this._rootRangeLength = rootRange.end - rootRange.start; + this._mainTree = new Item(0, rootRange.start); + } + + /** + * Get a compact name for a timestamp. Names are minted on-demand and + * cached for reverse lookup. + */ + nameForTimestamp(ts: number): string { + // Check cache first for exact matches. + for (const [name, cachedTs] of this._nameToTimestamp.entries()) { + if (cachedTs === ts) { + return name; + } + } + + let name: string; + + // Handle special boundary cases. + if (ts === this._rootRangeStart) { + name = 'ts-0'; + } else if (ts === this._rootRangeEnd) { + name = 'ts-Z'; + } else if (ts < this._rootRangeStart) { + // Before-start: find the appropriate exponential bucket. + const distance = this._rootRangeStart - ts; + const bucketNum = this._getBucketNumber(distance); + const bucket = this._getOrCreateBeforeBucket(bucketNum); + const bucketEnd = this._getBeforeBucketEnd(bucketNum); + name = bucket.nameForTimestamp(ts, bucketEnd, `ts<${bucketNum}`); + } else if (ts > this._rootRangeEnd) { + // After-end: find the appropriate exponential bucket. + const distance = ts - this._rootRangeEnd; + const bucketNum = this._getBucketNumber(distance); + const bucket = this._getOrCreateAfterBucket(bucketNum); + const bucketEnd = this._getAfterBucketEnd(bucketNum); + name = bucket.nameForTimestamp(ts, bucketEnd, `ts>${bucketNum}`); + } else { + // In-range: use main tree. + name = this._mainTree.nameForTimestamp(ts, this._rootRangeEnd, 'ts-'); + } + + // Cache for reverse lookup. + this._nameToTimestamp.set(name, ts); + return name; + } + + /** + * Reverse lookup: get the timestamp for a name that was previously + * returned by nameForTimestamp(). Returns null if the name is unknown. + */ + timestampForName(name: string): number | null { + return this._nameToTimestamp.get(name) ?? null; + } + + /** + * Format a timestamp as a human-readable string relative to range start. + */ + timestampString(ts: number): string { + return formatTimestamp(ts - this._rootRangeStart); + } + + /** + * Calculate which bucket number a timestamp belongs to based on distance + * from the range boundary. Buckets double in size exponentially. + * + * Bucket 0: distance <= 1×length + * Bucket 1: 1×length < distance <= 2×length + * Bucket 2: 2×length < distance <= 4×length + * Bucket n: 2^(n-1)×length < distance <= 2^n×length + */ + _getBucketNumber(distance: number): number { + const ratio = distance / this._rootRangeLength; + if (ratio <= 1) { + return 0; + } + return Math.ceil(Math.log2(ratio)); + } + + /** + * Get the start timestamp for a before-bucket. + * Bucket n covers [start - 2^n×length, start - 2^(n-1)×length]. + */ + _getBeforeBucketStart(bucketNum: number): number { + const distanceFromStart = Math.pow(2, bucketNum) * this._rootRangeLength; + return this._rootRangeStart - distanceFromStart; + } + + /** + * Get the end timestamp for a before-bucket. + */ + _getBeforeBucketEnd(bucketNum: number): number { + if (bucketNum === 0) { + return this._rootRangeStart; + } + const distanceFromStart = + Math.pow(2, bucketNum - 1) * this._rootRangeLength; + return this._rootRangeStart - distanceFromStart; + } + + /** + * Get the start timestamp for an after-bucket. + * Bucket n covers [end + 2^(n-1)×length, end + 2^n×length]. + */ + _getAfterBucketStart(bucketNum: number): number { + if (bucketNum === 0) { + return this._rootRangeEnd; + } + const distanceFromEnd = Math.pow(2, bucketNum - 1) * this._rootRangeLength; + return this._rootRangeEnd + distanceFromEnd; + } + + /** + * Get the end timestamp for an after-bucket. + */ + _getAfterBucketEnd(bucketNum: number): number { + const distanceFromEnd = Math.pow(2, bucketNum) * this._rootRangeLength; + return this._rootRangeEnd + distanceFromEnd; + } + + /** + * Get or create an Item tree for a before-bucket. + */ + _getOrCreateBeforeBucket(bucketNum: number): Item { + let bucket = this._beforeBuckets.get(bucketNum); + if (!bucket) { + const bucketStart = this._getBeforeBucketStart(bucketNum); + bucket = new Item(0, bucketStart); + this._beforeBuckets.set(bucketNum, bucket); + } + return bucket; + } + + /** + * Get or create an Item tree for an after-bucket. + */ + _getOrCreateAfterBucket(bucketNum: number): Item { + let bucket = this._afterBuckets.get(bucketNum); + if (!bucket) { + const bucketStart = this._getAfterBucketStart(bucketNum); + bucket = new Item(0, bucketStart); + this._afterBuckets.set(bucketNum, bucket); + } + return bucket; + } +} diff --git a/src/profile-query/types.ts b/src/profile-query/types.ts new file mode 100644 index 0000000000..0b2863ec98 --- /dev/null +++ b/src/profile-query/types.ts @@ -0,0 +1,428 @@ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +/** + * Shared types for profile querying. + * These types are used by both profile-query (the library) and profile-query-cli. + */ + +// ===== Utility types ===== + +export type TopMarker = { + handle: string; + label: string; + start: number; + duration?: number; + hasStack?: boolean; +}; + +export type FunctionDisplayInfo = { + name: string; + nameWithLibrary: string; + library?: string; +}; + +// ===== Filter Options ===== + +export type MarkerFilterOptions = { + searchString?: string; + minDuration?: number; // Minimum duration in milliseconds + maxDuration?: number; // Maximum duration in milliseconds + category?: string; // Filter by category name + hasStack?: boolean; // Only show markers with stack traces + limit?: number; // Limit the number of markers in aggregation (not output lines) + groupBy?: string; // Grouping strategy (e.g., "type,name" or "type,field:eventType") + autoGroup?: boolean; // Automatically determine grouping based on field variance +}; + +export type FunctionFilterOptions = { + searchString?: string; // Substring search in function names + minSelf?: number; // Minimum self time percentage (0-100) + limit?: number; // Limit the number of functions in output +}; + +// ===== Session Context ===== +// Context information included in all command results for persistent display + +export type SessionContext = { + selectedThreadHandle: string | null; // Combined handle like "t-0" or "t-0,t-1,t-2" + selectedThreads: Array<{ + threadIndex: number; + name: string; + }>; + currentViewRange: { + start: number; + startName: string; + end: number; + endName: string; + } | null; // null if viewing full profile + rootRange: { + start: number; + end: number; + }; +}; + +/** + * Wrapper type that adds session context to any result type. + */ +export type WithContext = T & { context: SessionContext }; + +// ===== Status Command ===== + +export type StatusResult = { + type: 'status'; + selectedThreadHandle: string | null; // Combined handle like "t-0" or "t-0,t-1,t-2" + selectedThreads: Array<{ + threadIndex: number; + name: string; + }>; + viewRanges: Array<{ + start: number; + startName: string; + end: number; + endName: string; + }>; + rootRange: { + start: number; + end: number; + }; +}; + +// ===== Function Commands ===== + +export type FunctionExpandResult = { + type: 'function-expand'; + functionHandle: string; + funcIndex: number; + threadHandle: string; + name: string; + fullName: string; + library?: string; +}; + +export type FunctionInfoResult = { + type: 'function-info'; + functionHandle: string; + funcIndex: number; + threadHandle: string; + threadName: string; + name: string; + fullName: string; + isJS: boolean; + relevantForJS: boolean; + resource?: { + name: string; + index: number; + }; + library?: { + name: string; + path: string; + debugName?: string; + debugPath?: string; + breakpadId?: string; + }; +}; + +// ===== View Range Commands ===== + +export type ViewRangeResult = { + type: 'view-range'; + action: 'push' | 'pop'; + range: { + start: number; + startName: string; + end: number; + endName: string; + }; + message: string; + // Enhanced information for better UX (optional, only present for 'push' action) + duration?: number; // Duration in milliseconds + zoomDepth?: number; // Current zoom stack depth + markerInfo?: { + // Present if zoomed to a marker + markerHandle: string; + markerName: string; + threadHandle: string; + threadName: string; + }; +}; + +// ===== Thread Commands ===== + +export type ThreadInfoResult = { + type: 'thread-info'; + threadHandle: string; + name: string; + friendlyName: string; + createdAt: number; + createdAtName: string; + endedAt: number | null; + endedAtName: string | null; + sampleCount: number; + markerCount: number; + cpuActivity: Array<{ + startTime: number; + startTimeName: string; + startTimeStr: string; + endTime: number; + endTimeName: string; + endTimeStr: string; + cpuMs: number; + depthLevel: number; + }> | null; +}; + +export type TopFunctionInfo = FunctionDisplayInfo & { + functionHandle: string; + functionIndex: number; + totalSamples: number; + totalPercentage: number; + selfSamples: number; + selfPercentage: number; +}; + +export type ThreadSamplesResult = { + type: 'thread-samples'; + threadHandle: string; + friendlyThreadName: string; + topFunctionsByTotal: TopFunctionInfo[]; + topFunctionsBySelf: TopFunctionInfo[]; + heaviestStack: { + selfSamples: number; + frameCount: number; + frames: Array< + FunctionDisplayInfo & { + totalSamples: number; + totalPercentage: number; + selfSamples: number; + selfPercentage: number; + } + >; + }; +}; + +export type ThreadSamplesTopDownResult = { + type: 'thread-samples-top-down'; + threadHandle: string; + friendlyThreadName: string; + regularCallTree: CallTreeNode; +}; + +export type ThreadSamplesBottomUpResult = { + type: 'thread-samples-bottom-up'; + threadHandle: string; + friendlyThreadName: string; + invertedCallTree: CallTreeNode | null; +}; + +/** + * Scoring strategy for selecting which call tree nodes to include. + * The score determines node priority, with the constraint that child score ≤ parent score. + */ +export type CallTreeScoringStrategy = + | 'exponential-0.95' // totalPercentage * (0.95 ^ depth) - slow decay + | 'exponential-0.9' // totalPercentage * (0.9 ^ depth) - medium decay + | 'exponential-0.8' // totalPercentage * (0.8 ^ depth) - fast decay + | 'harmonic-0.1' // totalPercentage / (1 + 0.1 * depth) - very slow + | 'harmonic-0.5' // totalPercentage / (1 + 0.5 * depth) - medium + | 'harmonic-1.0' // totalPercentage / (1 + depth) - standard harmonic + | 'percentage-only'; // totalPercentage - no depth penalty + +export type CallTreeNode = FunctionDisplayInfo & { + callNodeIndex?: number; // Optional for root node + functionHandle?: string; // Optional for root node + functionIndex?: number; // Optional for root node + totalSamples: number; + totalPercentage: number; + selfSamples: number; + selfPercentage: number; + /** Original depth in tree before collapsing single-child chains */ + originalDepth: number; + children: CallTreeNode[]; + /** Information about truncated children, if any were omitted */ + childrenTruncated?: { + count: number; + combinedSamples: number; + combinedPercentage: number; + maxSamples: number; + maxPercentage: number; + depth: number; // Depth where children were truncated + }; +}; + +export type ThreadMarkersResult = { + type: 'thread-markers'; + threadHandle: string; + friendlyThreadName: string; + totalMarkerCount: number; + filteredMarkerCount: number; + filters?: { + searchString?: string; + minDuration?: number; + maxDuration?: number; + category?: string; + hasStack?: boolean; + limit?: number; + }; + byType: Array<{ + markerName: string; + count: number; + isInterval: boolean; + durationStats?: DurationStats; + rateStats?: RateStats; + topMarkers: TopMarker[]; + subGroups?: MarkerGroupData[]; + subGroupKey?: string; + }>; + byCategory: Array<{ + categoryName: string; + categoryIndex: number; + count: number; + percentage: number; + }>; + customGroups?: MarkerGroupData[]; +}; + +export type DurationStats = { + min: number; + max: number; + avg: number; + median: number; + p95: number; + p99: number; +}; + +export type RateStats = { + markersPerSecond: number; + minGap: number; + avgGap: number; + maxGap: number; +}; + +export type MarkerGroupData = { + groupName: string; + count: number; + isInterval: boolean; + durationStats?: DurationStats; + rateStats?: RateStats; + topMarkers: TopMarker[]; + subGroups?: MarkerGroupData[]; +}; + +export type ThreadFunctionsResult = { + type: 'thread-functions'; + threadHandle: string; + friendlyThreadName: string; + totalFunctionCount: number; + filteredFunctionCount: number; + filters?: { + searchString?: string; + minSelf?: number; + limit?: number; + }; + functions: Array< + { + functionHandle: string; + selfSamples: number; + selfPercentage: number; + totalSamples: number; + totalPercentage: number; + // Optional full profile percentages (present when zoomed) + fullSelfPercentage?: number; + fullTotalPercentage?: number; + } & FunctionDisplayInfo + >; +}; + +// ===== Marker Commands ===== + +export type MarkerInfoResult = { + type: 'marker-info'; + threadHandle: string; + friendlyThreadName: string; + markerHandle: string; + markerIndex: number; + name: string; + tooltipLabel?: string; + markerType?: string; + category: { + index: number; + name: string; + }; + start: number; + end: number | null; + duration?: number; + fields?: Array<{ + key: string; + label: string; + value: any; + formattedValue: string; + }>; + schema?: { + description?: string; + }; + stack?: StackTraceData; +}; + +export type MarkerStackResult = { + type: 'marker-stack'; + threadHandle: string; + friendlyThreadName: string; + markerHandle: string; + markerIndex: number; + markerName: string; + stack: StackTraceData | null; +}; + +export type StackTraceData = { + capturedAt?: number; + frames: FunctionDisplayInfo[]; + truncated: boolean; +}; + +// ===== Profile Commands ===== + +export type ProfileInfoResult = { + type: 'profile-info'; + name: string; + platform: string; + threadCount: number; + processCount: number; + processes: Array<{ + processIndex: number; + pid: string; + name: string; + cpuMs: number; + startTime?: number; + startTimeName?: string; + endTime?: number | null; + endTimeName?: string | null; + threads: Array<{ + threadIndex: number; + threadHandle: string; + name: string; + cpuMs: number; + }>; + remainingThreads?: { + count: number; + combinedCpuMs: number; + maxCpuMs: number; + }; + }>; + remainingProcesses?: { + count: number; + combinedCpuMs: number; + maxCpuMs: number; + }; + cpuActivity: Array<{ + startTime: number; + startTimeName: string; + startTimeStr: string; + endTime: number; + endTimeName: string; + endTimeStr: string; + cpuMs: number; + depthLevel: number; + }> | null; +}; diff --git a/src/profile-query/webpack.config.js b/src/profile-query/webpack.config.js new file mode 100644 index 0000000000..6b3af921d5 --- /dev/null +++ b/src/profile-query/webpack.config.js @@ -0,0 +1,41 @@ +const path = require('path'); +const projectRoot = path.join(__dirname, '../..'); +const includes = [path.join(projectRoot, 'src')]; + +module.exports = { + name: 'profile-query', + target: 'node', + mode: process.env.NODE_ENV, + resolve: { + extensions: ['.js', '.jsx', '.ts', '.tsx'], + }, + output: { + path: path.resolve(projectRoot, 'dist'), + filename: 'profile-query.js', + library: { + type: 'commonjs2', + }, + globalObject: 'this', + }, + entry: './src/profile-query/index.ts', + module: { + rules: [ + { + test: /\.(js|ts|tsx)$/, + use: ['babel-loader'], + include: includes, + }, + { + test: /\.svg$/, + type: 'asset/resource', + }, + ], + }, + experiments: { + // Make WebAssembly work just like in webpack v4 + syncWebAssembly: true, + }, + optimization: { + minimize: false, + }, +}; diff --git a/src/selectors/per-thread/thread.tsx b/src/selectors/per-thread/thread.tsx index aa3dbbe94c..e43a3b789e 100644 --- a/src/selectors/per-thread/thread.tsx +++ b/src/selectors/per-thread/thread.tsx @@ -183,6 +183,22 @@ export function getBasicThreadSelectorsPerThread( } ); + /** + * Get activity slices for the range-filtered thread (respecting zoom). + * This shows CPU activity only for the samples within the committed range. + */ + const getRangeFilteredActivitySlices: Selector = + createSelector(getRangeFilteredThread, (thread) => { + const samples = thread.samples; + return samples.threadCPURatio + ? getSlices( + [0.05, 0.2, 0.4, 0.6, 0.8], + samples.threadCPURatio, + samples.time + ) + : null; + }); + /** * The CallTreeSummaryStrategy determines how the call tree summarizes the * the current thread. By default, this is done by timing, but other @@ -400,6 +416,7 @@ export function getBasicThreadSelectorsPerThread( getThread, getSamplesTable, getActivitySlices, + getRangeFilteredActivitySlices, getSamplesWeightType, getNativeAllocations, getJsAllocations, diff --git a/src/selectors/profile.ts b/src/selectors/profile.ts index a8d9d7cd95..db135ddc99 100644 --- a/src/selectors/profile.ts +++ b/src/selectors/profile.ts @@ -756,6 +756,22 @@ export const getCombinedThreadCPUData: Selector = + createSelector( + getAllThreadsSamplesTables, + getCommittedRange, + (samplesTables, range) => + CombinedCPU.combineCPUDataFromThreads( + samplesTables, + range.start, + range.end + ) + ); + /** * Get activity slices for the combined CPU usage across all threads. * Returns hierarchical slices showing periods of high combined CPU activity, @@ -774,6 +790,23 @@ export const getCombinedThreadActivitySlices: Selector = ); }); +/** + * Get activity slices for the combined CPU usage, filtered to the committed range. + * This respects zoom and shows only activity within the current view. + */ +export const getRangeFilteredCombinedThreadActivitySlices: Selector = + createSelector(getRangeFilteredCombinedThreadCPUData, (combinedCPU) => { + if (combinedCPU === null) { + return null; + } + const m = Math.ceil(combinedCPU.maxCpuRatio); + return getSlices( + [0.05 * m, 0.2 * m, 0.4 * m, 0.6 * m, 0.8 * m], + combinedCPU.cpuRatio, + combinedCPU.time + ); + }); + /** * Get the pages array and construct a Map of pages that we can use to get the * relationships of tabs. The constructed map is `Map`. diff --git a/src/test/unit/profile-query-cli/__snapshots__/call-tree-formatting.test.ts.snap b/src/test/unit/profile-query-cli/__snapshots__/call-tree-formatting.test.ts.snap new file mode 100644 index 0000000000..6e7e0a7fd6 --- /dev/null +++ b/src/test/unit/profile-query-cli/__snapshots__/call-tree-formatting.test.ts.snap @@ -0,0 +1,318 @@ +// Jest Snapshot v1, https://jestjs.io/docs/snapshot-testing + +exports[`call tree formatting bottom-up view complex nested trees formats a deep call chain inverted 1`] = ` +"[Thread: t-0 (Test Thread) | View: Full profile | Full: 1.00s] + +Thread: Test Thread + +Bottom-Up Call Tree: +f-1. Idle [total: 50.0%, self: 50.0%] +└─ f-5. Loop [total: 50.0%, self: 0.0%] + f-6. Main [total: 50.0%, self: 0.0%] +f-2. Think [total: 25.0%, self: 25.0%] +└─ f-7. AI [total: 25.0%, self: 0.0%] + f-4. Tick [total: 25.0%, self: 0.0%] + f-5. Loop [total: 25.0%, self: 0.0%] + f-6. Main [total: 25.0%, self: 0.0%] +f-3. Phys [total: 25.0%, self: 25.0%] +└─ f-4. Tick [total: 25.0%, self: 0.0%] + f-5. Loop [total: 25.0%, self: 0.0%] + f-6. Main [total: 25.0%, self: 0.0%]" +`; + +exports[`call tree formatting bottom-up view complex nested trees shows which functions call a leaf function 1`] = ` +"[Thread: t-0 (Test Thread) | View: Full profile | Full: 1.00s] + +Thread: Test Thread + +Bottom-Up Call Tree: +f-1. E [total: 100.0%, self: 100.0%] +└─ f-2. D [total: 100.0%, self: 0.0%] + ├─ f-3. A [total: 33.3%, self: 0.0%] + ├─ f-4. B [total: 33.3%, self: 0.0%] + └─ f-5. C [total: 33.3%, self: 0.0%]" +`; + +exports[`call tree formatting bottom-up view different scoring strategies exponential-0.9 strategy for bottom-up 1`] = ` +"[Thread: t-0 (Test Thread) | View: Full profile | Full: 1.00s] + +Thread: Test Thread + +Bottom-Up Call Tree: +f-1. G [total: 20.0%, self: 20.0%] +└─ f-7. D [total: 20.0%, self: 0.0%] + └─ ... (1 more children: combined 20.0%, max 20.0%) +f-2. E [total: 20.0%, self: 20.0%] +└─ f-6. A [total: 20.0%, self: 0.0%] +f-3. F [total: 20.0%, self: 20.0%] +└─ f-6. A [total: 20.0%, self: 0.0%] +f-4. B [total: 20.0%, self: 20.0%] +f-5. C [total: 20.0%, self: 20.0%]" +`; + +exports[`call tree formatting bottom-up view elision bugs each parent node should have at most one elision marker 1`] = ` +"[Thread: t-0 (Test Thread) | View: Full profile | Full: 1.00s] + +Thread: Test Thread + +Top-Down Call Tree: +f-1. A [total: 100.0%, self: 0.0%] +└─ f-2. B [total: 100.0%, self: 0.0%] + f-3. C [total: 100.0%, self: 0.0%] + └─ ... (1 more children: combined 100.0%, max 100.0%)" +`; + +exports[`call tree formatting bottom-up view elision bugs elided children percentages should be relative to parent, not full profile 1`] = ` +"[Thread: t-0 (Test Thread) | View: Full profile | Full: 1.00s] + +Thread: Test Thread + +Bottom-Up Call Tree: +f-1. B [total: 50.0%, self: 50.0%] +└─ ... (5 more children: combined 50.0%, max 10.0%) +f-2. D [total: 50.0%, self: 50.0%] +└─ f-3. C [total: 50.0%, self: 0.0%]" +`; + +exports[`call tree formatting bottom-up view elision bugs node whose children were never expanded must still show elision marker 1`] = ` +"[Thread: t-0 (Test Thread) | View: Full profile | Full: 1.00s] + +Thread: Test Thread + +Top-Down Call Tree: +f-1. Root [total: 100.0%, self: 0.0%] +├─ f-2. A [total: 60.0%, self: 0.0%] +│ ├─ f-4. A2 [total: 10.0%, self: 10.0%] +│ └─ ... (5 more children: combined 50.0%, max 10.0%) +├─ f-3. B [total: 20.0%, self: 0.0%] +│ └─ ... (2 more children: combined 20.0%, max 10.0%) +└─ ... (2 more children: combined 20.0%, max 10.0%)" +`; + +exports[`call tree formatting bottom-up view elision bugs sibling nodes with elided children should each show their own elision marker 1`] = ` +"[Thread: t-0 (Test Thread) | View: Full profile | Full: 1.00s] + +Thread: Test Thread + +Top-Down Call Tree: +f-1. A [total: 100.0%, self: 0.0%] +├─ f-2. B1 [total: 50.0%, self: 0.0%] +│ ├─ f-4. C5 [total: 10.0%, self: 10.0%] +│ └─ ... (4 more children: combined 40.0%, max 10.0%) +└─ f-3. B2 [total: 50.0%, self: 0.0%] + └─ ... (5 more children: combined 50.0%, max 10.0%)" +`; + +exports[`call tree formatting bottom-up view simple trees formats a branching tree inverted 1`] = ` +"[Thread: t-0 (Test Thread) | View: Full profile | Full: 1.00s] + +Thread: Test Thread + +Bottom-Up Call Tree: +f-1. D [total: 28.6%, self: 28.6%] +└─ f-5. A [total: 28.6%, self: 0.0%] +f-2. E [total: 28.6%, self: 28.6%] +└─ f-5. A [total: 28.6%, self: 0.0%] +f-3. B [total: 28.6%, self: 28.6%] +f-4. C [total: 14.3%, self: 14.3%]" +`; + +exports[`call tree formatting bottom-up view simple trees formats a simple linear tree inverted 1`] = ` +"[Thread: t-0 (Test Thread) | View: Full profile | Full: 1.00s] + +Thread: Test Thread + +Bottom-Up Call Tree: +f-1. D [total: 100.0%, self: 100.0%] +└─ f-2. C [total: 100.0%, self: 0.0%] + f-3. B [total: 100.0%, self: 0.0%] + f-4. A [total: 100.0%, self: 0.0%]" +`; + +exports[`call tree formatting bottom-up view trees with truncation shows elided callers at multiple levels 1`] = ` +"[Thread: t-0 (Test Thread) | View: Full profile | Full: 1.00s] + +Thread: Test Thread + +Bottom-Up Call Tree: +f-1. E [total: 25.0%, self: 25.0%] +└─ f-5. B [total: 25.0%, self: 0.0%] + f-4. A [total: 25.0%, self: 0.0%] +f-2. F [total: 25.0%, self: 25.0%] +└─ f-5. B [total: 25.0%, self: 0.0%] + f-4. A [total: 25.0%, self: 0.0%] +f-3. D [total: 25.0%, self: 25.0%] +└─ f-4. A [total: 25.0%, self: 0.0%]" +`; + +exports[`call tree formatting bottom-up view trees with truncation shows elided callers with correct percentages 1`] = ` +"[Thread: t-0 (Test Thread) | View: Full profile | Full: 1.00s] + +Thread: Test Thread + +Bottom-Up Call Tree: +f-1. B [total: 30.0%, self: 30.0%] +└─ f-4. A [total: 30.0%, self: 0.0%] +f-2. C [total: 20.0%, self: 20.0%] +└─ f-4. A [total: 20.0%, self: 0.0%] +f-3. D [total: 10.0%, self: 10.0%] +└─ ... (1 more children: combined 10.0%, max 10.0%)" +`; + +exports[`call tree formatting top-down view complex nested trees formats a complex tree with mixed branching patterns 1`] = ` +"[Thread: t-0 (Test Thread) | View: Full profile | Full: 1.00s] + +Thread: Test Thread + +Top-Down Call Tree: +f-1. Main [total: 100.0%, self: 0.0%] +├─ f-2. Loop [total: 90.0%, self: 0.0%] +│ ├─ f-4. Tick [total: 40.0%, self: 0.0%] +│ │ ├─ f-11. AI [total: 20.0%, self: 0.0%] +│ │ │ f-13. Think [total: 20.0%, self: 20.0%] +│ │ └─ f-12. Phys [total: 20.0%, self: 20.0%] +│ ├─ f-5. Idle [total: 20.0%, self: 20.0%] +│ ├─ f-6. Render [total: 10.0%, self: 10.0%] +│ ├─ f-7. Rende [total: 10.0%, self: 0.0%] +│ │ f-10. Layou [total: 10.0%, self: 10.0%] +│ └─ f-8. r Render [total: 10.0%, self: 0.0%] +│ f-9. t Layout [total: 10.0%, self: 10.0%] +└─ f-3. Init [total: 10.0%, self: 10.0%]" +`; + +exports[`call tree formatting top-down view complex nested trees formats a deep nested path with branching 1`] = ` +"[Thread: t-0 (Test Thread) | View: Full profile | Full: 1.00s] + +Thread: Test Thread + +Top-Down Call Tree: +f-1. A [total: 80.0%, self: 0.0%] +├─ f-3. C [total: 60.0%, self: 0.0%] +│ ├─ f-5. E [total: 40.0%, self: 0.0%] +│ │ ├─ f-7. G [total: 20.0%, self: 0.0%] +│ │ │ f-9. I [total: 20.0%, self: 20.0%] +│ │ └─ f-8. H [total: 20.0%, self: 20.0%] +│ └─ f-6. F [total: 20.0%, self: 20.0%] +└─ f-4. D [total: 20.0%, self: 20.0%] +f-2. B [total: 20.0%, self: 20.0%]" +`; + +exports[`call tree formatting top-down view different scoring strategies exponential-0.9 strategy output 1`] = ` +"[Thread: t-0 (Test Thread) | View: Full profile | Full: 1.00s] + +Thread: Test Thread + +Top-Down Call Tree: +f-1. A [total: 60.0%, self: 0.0%] +├─ f-4. D [total: 20.0%, self: 0.0%] +│ f-7. G [total: 20.0%, self: 20.0%] +├─ f-5. E [total: 20.0%, self: 20.0%] +└─ f-6. F [total: 20.0%, self: 20.0%] +f-2. B [total: 20.0%, self: 20.0%] +f-3. C [total: 20.0%, self: 20.0%]" +`; + +exports[`call tree formatting top-down view different scoring strategies percentage-only strategy output 1`] = ` +"[Thread: t-0 (Test Thread) | View: Full profile | Full: 1.00s] + +Thread: Test Thread + +Top-Down Call Tree: +f-1. A [total: 60.0%, self: 0.0%] +├─ f-4. D [total: 20.0%, self: 0.0%] +│ f-7. G [total: 20.0%, self: 20.0%] +├─ f-5. E [total: 20.0%, self: 20.0%] +└─ f-6. F [total: 20.0%, self: 20.0%] +f-2. B [total: 20.0%, self: 20.0%] +f-3. C [total: 20.0%, self: 20.0%]" +`; + +exports[`call tree formatting top-down view ordering and percentages correctly calculates percentages for nested nodes 1`] = ` +"[Thread: t-0 (Test Thread) | View: Full profile | Full: 1.00s] + +Thread: Test Thread + +Top-Down Call Tree: +f-1. A [total: 100.0%, self: 0.0%] +├─ f-2. B [total: 60.0%, self: 10.0%] +│ ├─ f-5. E [total: 30.0%, self: 30.0%] +│ └─ f-6. F [total: 20.0%, self: 20.0%] +├─ f-3. C [total: 20.0%, self: 20.0%] +└─ f-4. D [total: 20.0%, self: 20.0%]" +`; + +exports[`call tree formatting top-down view ordering and percentages maintains correct ordering by sample count 1`] = ` +"[Thread: t-0 (Test Thread) | View: Full profile | Full: 1.00s] + +Thread: Test Thread + +Top-Down Call Tree: +f-1. A [total: 100.0%, self: 0.0%] +├─ f-2. B [total: 50.0%, self: 50.0%] +├─ f-3. C [total: 30.0%, self: 30.0%] +└─ f-4. D [total: 20.0%, self: 20.0%]" +`; + +exports[`call tree formatting top-down view simple trees formats a branching tree 1`] = ` +"[Thread: t-0 (Test Thread) | View: Full profile | Full: 1.00s] + +Thread: Test Thread + +Top-Down Call Tree: +f-1. A [total: 57.1%, self: 0.0%] +├─ f-4. D [total: 28.6%, self: 28.6%] +└─ f-5. E [total: 28.6%, self: 28.6%] +f-2. B [total: 28.6%, self: 28.6%] +f-3. C [total: 14.3%, self: 14.3%]" +`; + +exports[`call tree formatting top-down view simple trees formats a simple linear tree 1`] = ` +"[Thread: t-0 (Test Thread) | View: Full profile | Full: 1.00s] + +Thread: Test Thread + +Top-Down Call Tree: +f-1. A [total: 100.0%, self: 0.0%] +└─ f-2. B [total: 100.0%, self: 0.0%] + f-3. C [total: 100.0%, self: 0.0%] + f-4. D [total: 100.0%, self: 100.0%]" +`; + +exports[`call tree formatting top-down view trees with truncation shows elided children at multiple levels 1`] = ` +"[Thread: t-0 (Test Thread) | View: Full profile | Full: 1.00s] + +Thread: Test Thread + +Top-Down Call Tree: +f-1. A [total: 100.0%, self: 0.0%] +├─ f-2. B [total: 50.0%, self: 0.0%] +│ └─ ... (2 more children: combined 50.0%, max 25.0%) +├─ f-3. C [total: 25.0%, self: 0.0%] +│ └─ ... (2 more children: combined 25.0%, max 12.5%) +└─ f-4. D [total: 25.0%, self: 25.0%]" +`; + +exports[`call tree formatting top-down view trees with truncation shows elided children with correct percentages 1`] = ` +"[Thread: t-0 (Test Thread) | View: Full profile | Full: 1.00s] + +Thread: Test Thread + +Top-Down Call Tree: +f-1. A [total: 100.0%, self: 0.0%] +├─ f-2. B [total: 30.0%, self: 30.0%] +└─ ... (6 more children: combined 70.0%, max 20.0%)" +`; + +exports[`call tree formatting top-down view trees with truncation shows truncation with wide trees (many siblings) 1`] = ` +"[Thread: t-0 (Test Thread) | View: Full profile | Full: 1.00s] + +Thread: Test Thread + +Top-Down Call Tree: +f-1. A [total: 100.0%, self: 0.0%] +├─ f-2. B [total: 8.3%, self: 8.3%] +├─ f-3. I [total: 8.3%, self: 8.3%] +├─ f-4. J [total: 8.3%, self: 8.3%] +├─ f-5. K [total: 8.3%, self: 8.3%] +└─ ... (8 more children: combined 66.7%, max 8.3%)" +`; diff --git a/src/test/unit/profile-query-cli/call-tree-formatting.test.ts b/src/test/unit/profile-query-cli/call-tree-formatting.test.ts new file mode 100644 index 0000000000..a295951306 --- /dev/null +++ b/src/test/unit/profile-query-cli/call-tree-formatting.test.ts @@ -0,0 +1,616 @@ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +import { FunctionMap } from '../../../profile-query/function-map'; +import { collectCallTree } from '../../../profile-query/formatters/call-tree'; +import type { + ThreadSamplesTopDownResult, + ThreadSamplesBottomUpResult, + SessionContext, + WithContext, +} from '../../../profile-query/types'; +import { getProfileFromTextSamples } from '../../fixtures/profiles/processed-profile'; +import { storeWithProfile } from '../../fixtures/stores'; +import { getThreadSelectors } from 'firefox-profiler/selectors/per-thread'; +import { + formatThreadSamplesTopDownResult, + formatThreadSamplesBottomUpResult, +} from '../../../profile-query-cli/formatters'; +import type { CallTreeCollectionOptions } from '../../../profile-query/formatters/call-tree'; +import { + getCallTree, + computeCallTreeTimings, + computeCallNodeSelfAndSummary, +} from 'firefox-profiler/profile-logic/call-tree'; +import { getInvertedCallNodeInfo } from 'firefox-profiler/profile-logic/profile-data'; +import { + getCategories, + getDefaultCategory, +} from 'firefox-profiler/selectors/profile'; + +/** + * Helper to create a mock session context for testing. + */ +function createMockContext(): SessionContext { + return { + selectedThreadHandle: 't-0', + selectedThreads: [{ threadIndex: 0, name: 'Test Thread' }], + currentViewRange: null, + rootRange: { start: 0, end: 1000 }, + }; +} + +/** + * Helper to build a ThreadSamplesTopDownResult from a profile. + */ +function buildTopDownResult( + profileSamples: string, + options: CallTreeCollectionOptions = {} +): WithContext { + const { profile } = getProfileFromTextSamples(profileSamples); + const store = storeWithProfile(profile); + const state = store.getState(); + const threadSelectors = getThreadSelectors(0); + const callTree = threadSelectors.getCallTree(state); + const functionMap = new FunctionMap(); + const threadIndexes = new Set([0]); + const libs = profile.libs; + + const regularCallTree = collectCallTree( + callTree, + functionMap, + threadIndexes, + libs, + options + ); + + return { + type: 'thread-samples-top-down', + threadHandle: 't-0', + friendlyThreadName: 'Test Thread', + regularCallTree, + context: createMockContext(), + }; +} + +/** + * Helper to build a ThreadSamplesBottomUpResult from a profile. + */ +function buildBottomUpResult( + profileSamples: string, + options: CallTreeCollectionOptions = {} +): WithContext { + const { profile } = getProfileFromTextSamples(profileSamples); + const store = storeWithProfile(profile); + const state = store.getState(); + const threadSelectors = getThreadSelectors(0); + const functionMap = new FunctionMap(); + const threadIndexes = new Set([0]); + const libs = profile.libs; + + // Build inverted call tree (bottom-up view) + let collectedInvertedTree = null; + try { + const thread = threadSelectors.getFilteredThread(state); + const callNodeInfo = threadSelectors.getCallNodeInfo(state); + const categories = getCategories(state); + const defaultCategory = getDefaultCategory(state); + const weightType = threadSelectors.getWeightTypeForCallTree(state); + const samples = threadSelectors.getPreviewFilteredCtssSamples(state); + const sampleIndexToCallNodeIndex = + threadSelectors.getSampleIndexToNonInvertedCallNodeIndexForFilteredThread( + state + ); + + const callNodeSelfAndSummary = computeCallNodeSelfAndSummary( + samples, + sampleIndexToCallNodeIndex, + callNodeInfo.getCallNodeTable().length + ); + + const invertedCallNodeInfo = getInvertedCallNodeInfo( + callNodeInfo, + defaultCategory, + thread.funcTable.length + ); + + const invertedTimings = computeCallTreeTimings( + invertedCallNodeInfo, + callNodeSelfAndSummary + ); + + const invertedTree = getCallTree( + thread, + invertedCallNodeInfo, + categories, + invertedTimings, + weightType + ); + + collectedInvertedTree = collectCallTree( + invertedTree, + functionMap, + threadIndexes, + libs, + options + ); + } catch (e) { + // Failed to create inverted tree + console.error('Failed to create inverted call tree:', e); + } + + return { + type: 'thread-samples-bottom-up', + threadHandle: 't-0', + friendlyThreadName: 'Test Thread', + invertedCallTree: collectedInvertedTree, + context: createMockContext(), + }; +} + +describe('call tree formatting', function () { + describe('top-down view', function () { + describe('simple trees', function () { + it('formats a simple linear tree', function () { + const result = buildTopDownResult( + ` + A + B + C + D + `, + { maxNodes: 10 } + ); + + const formatted = formatThreadSamplesTopDownResult(result); + expect(formatted).toMatchSnapshot(); + }); + + it('formats a branching tree', function () { + const result = buildTopDownResult( + ` + A A A A B B C + D D E E + `, + { maxNodes: 10 } + ); + + const formatted = formatThreadSamplesTopDownResult(result); + expect(formatted).toMatchSnapshot(); + }); + }); + + describe('trees with truncation', function () { + it('shows elided children with correct percentages', function () { + const result = buildTopDownResult( + ` + A A A A A A A A A A + B B B C C D E F G H + `, + { maxNodes: 2 } + ); + + const formatted = formatThreadSamplesTopDownResult(result); + expect(formatted).toMatchSnapshot(); + }); + + it('shows elided children at multiple levels', function () { + const result = buildTopDownResult( + ` + A A A A A A A A + B B B B C C D D + E E F F G H + `, + { maxNodes: 4 } + ); + + const formatted = formatThreadSamplesTopDownResult(result); + expect(formatted).toMatchSnapshot(); + }); + + it('shows truncation with wide trees (many siblings)', function () { + const result = buildTopDownResult( + ` + A A A A A A A A A A A A + B C D E F G H I J K L M + `, + { maxNodes: 5, maxChildrenPerNode: 10 } + ); + + const formatted = formatThreadSamplesTopDownResult(result); + expect(formatted).toMatchSnapshot(); + }); + }); + + describe('complex nested trees', function () { + it('formats a deep nested path with branching', function () { + const result = buildTopDownResult( + ` + A A A A A A A A B B + C C C C C C D D + E E E E F F + G G H H + I I + `, + { maxNodes: 15 } + ); + + const formatted = formatThreadSamplesTopDownResult(result); + expect(formatted).toMatchSnapshot(); + }); + + it('formats a complex tree with mixed branching patterns', function () { + const result = buildTopDownResult( + ` + Main Main Main Main Main Main Main Main Main Main + Init Loop Loop Loop Loop Loop Loop Loop Loop Loop + Tick Tick Tick Tick Idle Idle Render Render Render + AI AI Phys Phys Layout Layout + Think Think + `, + { maxNodes: 15 } + ); + + const formatted = formatThreadSamplesTopDownResult(result); + expect(formatted).toMatchSnapshot(); + }); + }); + + describe('ordering and percentages', function () { + it('maintains correct ordering by sample count', function () { + const result = buildTopDownResult( + ` + A A A A A A A A A A + B B B B B C C C D D + `, + { maxNodes: 10 } + ); + + const formatted = formatThreadSamplesTopDownResult(result); + expect(formatted).toMatchSnapshot(); + + // Verify ordering in the result structure + const aNode = result.regularCallTree.children[0]; + expect(aNode.children[0].name).toBe('B'); // 5 samples + expect(aNode.children[1].name).toBe('C'); // 3 samples + expect(aNode.children[2].name).toBe('D'); // 2 samples + }); + + it('correctly calculates percentages for nested nodes', function () { + const result = buildTopDownResult( + ` + A A A A A A A A A A + B B B B B B C C D D + E E E F F + `, + { maxNodes: 20 } + ); + + const formatted = formatThreadSamplesTopDownResult(result); + expect(formatted).toMatchSnapshot(); + + // Verify percentages + const aNode = result.regularCallTree.children[0]; + expect(aNode.totalPercentage).toBeCloseTo(100, 0); + + const bNode = aNode.children[0]; + expect(bNode.totalPercentage).toBeCloseTo(60, 0); + + const eNode = bNode.children[0]; + expect(eNode.totalPercentage).toBeCloseTo(30, 0); + }); + }); + + describe('different scoring strategies', function () { + it('exponential-0.9 strategy output', function () { + const result = buildTopDownResult( + ` + A A A A A A B B C C + D D E E F F + G G + `, + { maxNodes: 8, scoringStrategy: 'exponential-0.9' } + ); + + const formatted = formatThreadSamplesTopDownResult(result); + expect(formatted).toMatchSnapshot(); + }); + + it('percentage-only strategy output', function () { + const result = buildTopDownResult( + ` + A A A A A A B B C C + D D E E F F + G G + `, + { maxNodes: 8, scoringStrategy: 'percentage-only' } + ); + + const formatted = formatThreadSamplesTopDownResult(result); + expect(formatted).toMatchSnapshot(); + }); + }); + }); + + describe('bottom-up view', function () { + describe('simple trees', function () { + it('formats a simple linear tree inverted', function () { + const result = buildBottomUpResult( + ` + A + B + C + D + `, + { maxNodes: 10 } + ); + + const formatted = formatThreadSamplesBottomUpResult(result); + expect(formatted).toMatchSnapshot(); + }); + + it('formats a branching tree inverted', function () { + const result = buildBottomUpResult( + ` + A A A A B B C + D D E E + `, + { maxNodes: 10 } + ); + + const formatted = formatThreadSamplesBottomUpResult(result); + expect(formatted).toMatchSnapshot(); + }); + }); + + describe('trees with truncation', function () { + it('shows elided callers with correct percentages', function () { + const result = buildBottomUpResult( + ` + A A A A A A A A A A + B B B C C D E F G H + `, + { maxNodes: 5 } + ); + + const formatted = formatThreadSamplesBottomUpResult(result); + expect(formatted).toMatchSnapshot(); + }); + + it('shows elided callers at multiple levels', function () { + const result = buildBottomUpResult( + ` + A A A A A A A A + B B B B C C D D + E E F F G H + `, + { maxNodes: 8 } + ); + + const formatted = formatThreadSamplesBottomUpResult(result); + expect(formatted).toMatchSnapshot(); + }); + }); + + describe('complex nested trees', function () { + it('formats a deep call chain inverted', function () { + const result = buildBottomUpResult( + ` + Main Main Main Main Main Main Main Main + Loop Loop Loop Loop Loop Loop Loop Loop + Tick Tick Tick Tick Idle Idle Idle Idle + AI AI Phys Phys + Think Think + `, + { maxNodes: 15 } + ); + + const formatted = formatThreadSamplesBottomUpResult(result); + expect(formatted).toMatchSnapshot(); + }); + + it('shows which functions call a leaf function', function () { + const result = buildBottomUpResult( + ` + A A B B C C + D D D D D D + E E E E E E + `, + { maxNodes: 10 } + ); + + const formatted = formatThreadSamplesBottomUpResult(result); + expect(formatted).toMatchSnapshot(); + }); + }); + + describe('different scoring strategies', function () { + it('exponential-0.9 strategy for bottom-up', function () { + const result = buildBottomUpResult( + ` + A A A A A A B B C C + D D E E F F + G G + `, + { maxNodes: 8, scoringStrategy: 'exponential-0.9' } + ); + + const formatted = formatThreadSamplesBottomUpResult(result); + expect(formatted).toMatchSnapshot(); + }); + }); + + describe('elision bugs', function () { + it('elided children percentages should be relative to parent, not full profile', function () { + // Create a tree where B represents 50% of samples (5 out of 10). + // B has multiple callers (A1, A2, A3, A4, A5) that will be truncated. + // The elided caller percentages should be relative to B's total (50%), + // not relative to the full profile (100%). + const result = buildBottomUpResult( + ` + A1 A2 A3 A4 A5 C C C C C + B B B B B D D D D D + `, + { maxNodes: 3 } + ); + + const formatted = formatThreadSamplesBottomUpResult(result); + expect(formatted).toMatchSnapshot(); + + // Verify the bug: currently elided percentages are calculated relative to full profile + expect(result.invertedCallTree).toBeDefined(); + const bNode = result.invertedCallTree!.children.find( + (n) => n.name === 'B' + ); + expect(bNode).toBeDefined(); + + // B should have truncated children since we have limited nodes + // With the bug, the elided callers show as % of full profile (10 samples) + // After fix, they should show as % of B's total (5 samples = 50% of profile) + // The elided callers combined should be close to 100% of B's total, + // but with the bug they'll show as ~50% (or less depending on which callers were included) + + // For now, the snapshot will capture the buggy behavior + // After fix, we'll update snapshots and add more specific assertions + }); + + it('each parent node should have at most one elision marker', function () { + // Create a tree where a single parent has both depth limit and truncation + const result = buildTopDownResult( + ` + A A A A A A A A A A + B B B B B B B B B B + C C C C C C C C C C + D D D D D D D D D D + E E E E E E E E E E + F F F F F F F F F F + `, + { maxNodes: 3, maxDepth: 3 } + ); + + const formatted = formatThreadSamplesTopDownResult(result); + expect(formatted).toMatchSnapshot(); + + // Verify that each parent has at most one elision marker + // Count consecutive elision markers (which would indicate duplicates for same parent) + const lines = formatted.split('\n'); + let consecutiveElisionCount = 0; + let maxConsecutiveElisions = 0; + + for (const line of lines) { + if (line.includes('└─ ...')) { + consecutiveElisionCount++; + maxConsecutiveElisions = Math.max( + maxConsecutiveElisions, + consecutiveElisionCount + ); + } else if (line.trim().length > 0) { + consecutiveElisionCount = 0; + } + } + + // Should never have more than 1 consecutive elision marker + expect(maxConsecutiveElisions).toBeLessThanOrEqual(1); + }); + + it('sibling nodes with elided children should each show their own elision marker', function () { + // Create a tree where two sibling nodes each have elided children + // This tests that elision markers are per-parent, not per-indentation-level + const result = buildTopDownResult( + ` + A A A A A A A A A A + B1 B1 B1 B1 B1 B2 B2 B2 B2 B2 + C1 C2 C3 C4 C5 D1 D2 D3 D4 D5 + `, + { maxNodes: 4 } + ); + + const formatted = formatThreadSamplesTopDownResult(result); + expect(formatted).toMatchSnapshot(); + + // Count how many elision markers appear in the output + const lines = formatted.split('\n'); + const elisionMarkerCount = lines.filter((line) => + line.includes('└─ ...') + ).length; + + // We expect at least 2 elision markers (one for each sibling B1 and B2) + // Both have many children but limited maxNodes, so both should have elisions + expect(elisionMarkerCount).toBeGreaterThanOrEqual(2); + }); + + it('node whose children were never expanded must still show elision marker', function () { + // Reproduce bug where CallWindowProcW has 55.8% total, 0% self, but no elision marker + // This happens when a node is included but hits the budget limit before its children are expanded + const result = buildTopDownResult( + ` + Root Root Root Root Root Root Root Root Root Root + A A A A A A B B C D + A1 A2 A3 A4 A5 A6 B1 B2 + `, + { maxNodes: 4, maxChildrenPerNode: 2 } // Very tight: Root, A, B, C (A never expanded) + ); + + const formatted = formatThreadSamplesTopDownResult(result); + expect(formatted).toMatchSnapshot(); + + // Parse the tree and verify invariant: every node with total > self must show where the time went + const lines = formatted.split('\n'); + const violations: string[] = []; + + for (let i = 0; i < lines.length; i++) { + const line = lines[i]; + // Match node lines like "├─ f-2. A [total: 50.0%, self: 0.0%]" or "f-2. A [total: 50.0%, self: 0.0%]" + const match = line.match( + /[├└]?─?\s*f-\d+\.\s+(.+?)\s+\[total:\s+([\d.]+)%,\s+self:\s+([\d.]+)%\]/ + ); + if (match) { + const nodeName = match[1]; + const total = parseFloat(match[2]); + const self = parseFloat(match[3]); + + // If total > self, this node has children that account for the difference + if (total > self + 0.01) { + // Check the next line - it must be either a child node or an elision marker + const nextLine = i + 1 < lines.length ? lines[i + 1] : ''; + + // A child line either: + // 1. Starts with more whitespace than current line (deeper nesting) + // 2. Contains tree symbols │, ├─, or └─ + // 3. Contains an elision marker └─ ... + + const currentLeadingSpaces = + line.match(/^(\s*)/)?.[1].length || 0; + const nextLeadingSpaces = + nextLine.match(/^(\s*)/)?.[1].length || 0; + + const hasTreeSymbols = + nextLine.includes('│') || + nextLine.includes('├─') || + nextLine.includes('└─'); + + const isChild = + nextLine.trim().length > 0 && + (nextLeadingSpaces > currentLeadingSpaces || hasTreeSymbols); + + if (!isChild) { + violations.push( + `Node "${nodeName}" has total=${total}%, self=${self}% but no child/elision marker:\n Line ${i + 1}: ${line}\n Next: ${nextLine}` + ); + } + } + } + } + + // Report all violations + if (violations.length > 0) { + throw new Error( + `Found ${violations.length} node(s) missing elision markers:\n\n` + + violations.join('\n\n') + ); + } + }); + }); + }); +}); diff --git a/src/test/unit/profile-query-cli/client.test.ts b/src/test/unit/profile-query-cli/client.test.ts new file mode 100644 index 0000000000..cbc5831b16 --- /dev/null +++ b/src/test/unit/profile-query-cli/client.test.ts @@ -0,0 +1,28 @@ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +/** + * Unit tests for profile-query-cli client. + * + * NOTE: This file intentionally contains no tests. + * + * The client.ts module handles cross-process communication via Unix sockets, + * which requires spawning separate daemon processes. This is too complex to + * manage reliably in Jest unit tests. + * + * Instead, client functionality is tested through integration tests in bash + * scripts: + * - bin/pq-test: Basic daemon lifecycle and client-server communication + * - bin/pq-test-multi: Concurrent client sessions + * + * Do not add unit tests here. If you need to test pure utility functions from + * client.ts, extract them to a separate module and test that module instead. + */ + +describe('profile-query-cli client', function () { + it('has no unit tests (see comment above)', function () { + // This test exists only to prevent Jest from complaining about an empty suite + expect(true).toBe(true); + }); +}); diff --git a/src/test/unit/profile-query-cli/daemon.test.ts b/src/test/unit/profile-query-cli/daemon.test.ts new file mode 100644 index 0000000000..5f42834c75 --- /dev/null +++ b/src/test/unit/profile-query-cli/daemon.test.ts @@ -0,0 +1,28 @@ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +/** + * Unit tests for profile-query-cli daemon. + * + * NOTE: This file intentionally contains no tests. + * + * The daemon.ts module handles cross-process communication via Unix sockets, + * spawns background processes, and manages long-lived server state. This is + * too complex to manage reliably in Jest unit tests. + * + * Instead, daemon functionality is tested through integration tests in bash + * scripts: + * - bin/pq-test: Basic daemon lifecycle (start, connect, stop) + * - bin/pq-test-multi: Multiple concurrent daemon sessions + * + * Do not add unit tests here. If you need to test pure utility functions from + * daemon.ts, extract them to a separate module and test that module instead. + */ + +describe('profile-query-cli daemon', function () { + it('has no unit tests (see comment above)', function () { + // This test exists only to prevent Jest from complaining about an empty suite + expect(true).toBe(true); + }); +}); diff --git a/src/test/unit/profile-query-cli/session.test.ts b/src/test/unit/profile-query-cli/session.test.ts new file mode 100644 index 0000000000..c6ad3c7e79 --- /dev/null +++ b/src/test/unit/profile-query-cli/session.test.ts @@ -0,0 +1,355 @@ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +/** + * Unit tests for profile-query CLI session management. + * + * These tests cover only the session.ts utility functions. + * Integration tests that spawn daemons and test IPC are in bash scripts: + * - bin/pq-test: Basic daemon lifecycle + * - bin/pq-test-multi: Concurrent sessions + */ + +import * as fs from 'fs'; +import * as path from 'path'; +import * as os from 'os'; +import { + ensureSessionDir, + generateSessionId, + getSocketPath, + getLogPath, + getMetadataPath, + saveSessionMetadata, + loadSessionMetadata, + setCurrentSession, + getCurrentSessionId, + getCurrentSocketPath, + isProcessRunning, + cleanupSession, + validateSession, + listSessions, +} from 'firefox-profiler/profile-query-cli/session'; +import type { SessionMetadata } from 'firefox-profiler/profile-query-cli/protocol'; + +const TEST_BUILD_HASH = 'test-build-hash'; + +describe('profile-query-cli session management', function () { + let testSessionDir: string; + + beforeEach(function () { + // Create a unique temp directory for each test + testSessionDir = fs.mkdtempSync(path.join(os.tmpdir(), 'pq-test-')); + }); + + afterEach(function () { + // Clean up test directory + if (fs.existsSync(testSessionDir)) { + fs.rmSync(testSessionDir, { recursive: true, force: true }); + } + }); + + describe('ensureSessionDir', function () { + it('creates session directory if it does not exist', function () { + const newDir = path.join(testSessionDir, 'subdir'); + expect(fs.existsSync(newDir)).toBe(false); + + ensureSessionDir(newDir); + + expect(fs.existsSync(newDir)).toBe(true); + expect(fs.statSync(newDir).isDirectory()).toBe(true); + }); + + it('does not fail if directory already exists', function () { + ensureSessionDir(testSessionDir); + + expect(() => ensureSessionDir(testSessionDir)).not.toThrow(); + expect(fs.existsSync(testSessionDir)).toBe(true); + }); + }); + + describe('generateSessionId', function () { + it('returns a non-empty string', function () { + const sessionId = generateSessionId(); + expect(typeof sessionId).toBe('string'); + expect(sessionId.length).toBeGreaterThan(0); + }); + + it('returns different IDs on successive calls', function () { + const id1 = generateSessionId(); + const id2 = generateSessionId(); + expect(id1).not.toBe(id2); + }); + }); + + describe('path generation', function () { + it('getSocketPath returns correct path', function () { + const sessionId = 'test123'; + const socketPath = getSocketPath(testSessionDir, sessionId); + expect(socketPath).toBe(path.join(testSessionDir, 'test123.sock')); + }); + + it('getLogPath returns correct path', function () { + const sessionId = 'test123'; + const logPath = getLogPath(testSessionDir, sessionId); + expect(logPath).toBe(path.join(testSessionDir, 'test123.log')); + }); + + it('getMetadataPath returns correct path', function () { + const sessionId = 'test123'; + const metadataPath = getMetadataPath(testSessionDir, sessionId); + expect(metadataPath).toBe(path.join(testSessionDir, 'test123.json')); + }); + }); + + describe('metadata serialization', function () { + it('saves and loads metadata correctly', function () { + const metadata: SessionMetadata = { + id: 'test123', + socketPath: getSocketPath(testSessionDir, 'test123'), + logPath: getLogPath(testSessionDir, 'test123'), + pid: 12345, + profilePath: '/path/to/profile.json', + createdAt: '2025-10-31T10:00:00.000Z', + buildHash: TEST_BUILD_HASH, + }; + + saveSessionMetadata(testSessionDir, metadata); + + const loaded = loadSessionMetadata(testSessionDir, 'test123'); + expect(loaded).toEqual(metadata); + }); + + it('returns null for non-existent session', function () { + const loaded = loadSessionMetadata(testSessionDir, 'nonexistent'); + expect(loaded).toBeNull(); + }); + + it('returns null for malformed JSON', function () { + const metadataPath = getMetadataPath(testSessionDir, 'bad'); + fs.writeFileSync(metadataPath, 'not valid JSON {'); + + const loaded = loadSessionMetadata(testSessionDir, 'bad'); + expect(loaded).toBeNull(); + }); + }); + + describe('current session tracking', function () { + it('sets and gets current session via symlink', function () { + const sessionId = 'test123'; + const socketPath = getSocketPath(testSessionDir, sessionId); + fs.writeFileSync(socketPath, ''); + + setCurrentSession(testSessionDir, sessionId); + + const currentId = getCurrentSessionId(testSessionDir); + expect(currentId).toBe(sessionId); + }); + + it('returns null when no current session exists', function () { + const currentId = getCurrentSessionId(testSessionDir); + expect(currentId).toBeNull(); + }); + + it('replaces existing current session symlink', function () { + // Create first session + const socket1 = getSocketPath(testSessionDir, 'session1'); + fs.writeFileSync(socket1, ''); + setCurrentSession(testSessionDir, 'session1'); + expect(getCurrentSessionId(testSessionDir)).toBe('session1'); + + // Create second session + const socket2 = getSocketPath(testSessionDir, 'session2'); + fs.writeFileSync(socket2, ''); + setCurrentSession(testSessionDir, 'session2'); + expect(getCurrentSessionId(testSessionDir)).toBe('session2'); + }); + + it('getCurrentSocketPath resolves to correct path', function () { + const sessionId = 'test123'; + const socketPath = getSocketPath(testSessionDir, sessionId); + fs.writeFileSync(socketPath, ''); + + setCurrentSession(testSessionDir, sessionId); + + const currentPath = getCurrentSocketPath(testSessionDir); + expect(currentPath).toBe(socketPath); + }); + }); + + describe('isProcessRunning', function () { + it('returns true for current process', function () { + expect(isProcessRunning(process.pid)).toBe(true); + }); + + it('returns false for non-existent PID', function () { + expect(isProcessRunning(999999)).toBe(false); + }); + }); + + describe('cleanupSession', function () { + it('removes socket and metadata files', function () { + const sessionId = 'test123'; + const socketPath = getSocketPath(testSessionDir, sessionId); + const metadataPath = getMetadataPath(testSessionDir, sessionId); + + fs.writeFileSync(socketPath, ''); + fs.writeFileSync(metadataPath, '{}'); + + cleanupSession(testSessionDir, sessionId); + + expect(fs.existsSync(socketPath)).toBe(false); + expect(fs.existsSync(metadataPath)).toBe(false); + }); + + it('preserves log file', function () { + const sessionId = 'test123'; + const logPath = getLogPath(testSessionDir, sessionId); + fs.writeFileSync(logPath, 'log data'); + + cleanupSession(testSessionDir, sessionId); + + expect(fs.existsSync(logPath)).toBe(true); + }); + + it('removes current session symlink if it points to this session', function () { + const sessionId = 'test123'; + const socketPath = getSocketPath(testSessionDir, sessionId); + fs.writeFileSync(socketPath, ''); + setCurrentSession(testSessionDir, sessionId); + + cleanupSession(testSessionDir, sessionId); + + expect(getCurrentSessionId(testSessionDir)).toBeNull(); + }); + + it('does not remove current session symlink if it points to different session', function () { + // Set current session to session1 + const socket1 = getSocketPath(testSessionDir, 'session1'); + fs.writeFileSync(socket1, ''); + setCurrentSession(testSessionDir, 'session1'); + + // Clean up session2 + cleanupSession(testSessionDir, 'session2'); + + // Current session should still be session1 + expect(getCurrentSessionId(testSessionDir)).toBe('session1'); + }); + }); + + describe('validateSession', function () { + it('returns false for non-existent session', function () { + expect(validateSession(testSessionDir, 'nonexistent')).toBe(null); + }); + + it('returns false for session with dead PID', function () { + const sessionId = 'test123'; + const metadata: SessionMetadata = { + id: sessionId, + socketPath: getSocketPath(testSessionDir, sessionId), + logPath: getLogPath(testSessionDir, sessionId), + pid: 999999, // Non-existent PID + profilePath: '/path/to/profile.json', + createdAt: new Date().toISOString(), + buildHash: TEST_BUILD_HASH, + }; + + saveSessionMetadata(testSessionDir, metadata); + fs.writeFileSync(metadata.socketPath, ''); + + expect(validateSession(testSessionDir, sessionId)).toBe(null); + }); + + it('returns false for session with missing socket', function () { + const sessionId = 'test123'; + const metadata: SessionMetadata = { + id: sessionId, + socketPath: getSocketPath(testSessionDir, sessionId), + logPath: getLogPath(testSessionDir, sessionId), + pid: process.pid, // Use current process PID (guaranteed to exist) + profilePath: '/path/to/profile.json', + createdAt: new Date().toISOString(), + buildHash: TEST_BUILD_HASH, + }; + + saveSessionMetadata(testSessionDir, metadata); + // Intentionally don't create socket file + + expect(validateSession(testSessionDir, sessionId)).toBe(null); + }); + + it('returns true for valid session', function () { + const sessionId = 'test123'; + const metadata: SessionMetadata = { + id: sessionId, + socketPath: getSocketPath(testSessionDir, sessionId), + logPath: getLogPath(testSessionDir, sessionId), + pid: process.pid, // Use current process PID + profilePath: '/path/to/profile.json', + createdAt: new Date().toISOString(), + buildHash: TEST_BUILD_HASH, + }; + + saveSessionMetadata(testSessionDir, metadata); + fs.writeFileSync(metadata.socketPath, ''); + + expect(validateSession(testSessionDir, sessionId)).not.toBe(null); + }); + }); + + describe('listSessions', function () { + it('returns empty array when no sessions exist', function () { + const sessions = listSessions(testSessionDir); + expect(sessions).toEqual([]); + }); + + it('lists all session IDs', function () { + // Create multiple sessions + saveSessionMetadata(testSessionDir, { + id: 'session1', + socketPath: getSocketPath(testSessionDir, 'session1'), + logPath: getLogPath(testSessionDir, 'session1'), + pid: 1, + profilePath: '/test1.json', + createdAt: new Date().toISOString(), + buildHash: TEST_BUILD_HASH, + }); + + saveSessionMetadata(testSessionDir, { + id: 'session2', + socketPath: getSocketPath(testSessionDir, 'session2'), + logPath: getLogPath(testSessionDir, 'session2'), + pid: 2, + profilePath: '/test2.json', + createdAt: new Date().toISOString(), + buildHash: TEST_BUILD_HASH, + }); + + const sessions = listSessions(testSessionDir); + expect(sessions).toContain('session1'); + expect(sessions).toContain('session2'); + expect(sessions.length).toBe(2); + }); + + it('ignores non-JSON files', function () { + // Create session metadata + saveSessionMetadata(testSessionDir, { + id: 'session1', + socketPath: getSocketPath(testSessionDir, 'session1'), + logPath: getLogPath(testSessionDir, 'session1'), + pid: 1, + profilePath: '/test.json', + createdAt: new Date().toISOString(), + buildHash: TEST_BUILD_HASH, + }); + + // Create non-JSON files + fs.writeFileSync(path.join(testSessionDir, 'session1.sock'), ''); + fs.writeFileSync(path.join(testSessionDir, 'session1.log'), ''); + fs.writeFileSync(path.join(testSessionDir, 'random.txt'), ''); + + const sessions = listSessions(testSessionDir); + expect(sessions).toEqual(['session1']); + }); + }); +}); diff --git a/src/test/unit/profile-query-marker-utils.test.ts b/src/test/unit/profile-query-marker-utils.test.ts new file mode 100644 index 0000000000..0752445966 --- /dev/null +++ b/src/test/unit/profile-query-marker-utils.test.ts @@ -0,0 +1,208 @@ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +import { + computeDurationStats, + computeRateStats, + formatDuration, +} from '../../profile-query/formatters/marker-info'; + +import type { Marker } from 'firefox-profiler/types'; + +describe('marker-info utility functions', function () { + describe('formatDuration', function () { + it('formats microseconds correctly', function () { + expect(formatDuration(0.001)).toBe('1.00μs'); + expect(formatDuration(0.5)).toBe('500.00μs'); + expect(formatDuration(0.999)).toBe('999.00μs'); + }); + + it('formats milliseconds correctly', function () { + expect(formatDuration(1)).toBe('1.00ms'); + expect(formatDuration(10.5)).toBe('10.50ms'); + expect(formatDuration(999)).toBe('999.00ms'); + }); + + it('formats seconds correctly', function () { + expect(formatDuration(1000)).toBe('1.00s'); + expect(formatDuration(5500)).toBe('5.50s'); + expect(formatDuration(60000)).toBe('60.00s'); + }); + }); + + describe('computeDurationStats', function () { + function makeMarker(start: number, end: number | null): Marker { + return { + start, + end, + name: 'TestMarker', + category: 0, + data: null, + threadId: null, + }; + } + + it('returns undefined for empty marker list', function () { + expect(computeDurationStats([])).toBe(undefined); + }); + + it('returns undefined for instant markers only', function () { + const markers = [ + makeMarker(0, null), + makeMarker(1, null), + makeMarker(2, null), + ]; + expect(computeDurationStats(markers)).toBe(undefined); + }); + + it('computes stats for interval markers', function () { + const markers = [ + makeMarker(0, 1), // 1ms + makeMarker(1, 3), // 2ms + makeMarker(3, 6), // 3ms + makeMarker(6, 10), // 4ms + makeMarker(10, 15), // 5ms + ]; + + const stats = computeDurationStats(markers); + expect(stats).toBeDefined(); + expect(stats!.min).toBe(1); + expect(stats!.max).toBe(5); + expect(stats!.avg).toBe(3); + expect(stats!.median).toBe(3); + // For 5 items: p95 = floor(5 * 0.95) = floor(4.75) = 4th index (0-based) = 5 + expect(stats!.p95).toBe(5); + // For 5 items: p99 = floor(5 * 0.99) = floor(4.95) = 4th index (0-based) = 5 + expect(stats!.p99).toBe(5); + }); + + it('handles mixed instant and interval markers', function () { + const markers = [ + makeMarker(0, null), // instant + makeMarker(1, 2), // 1ms + makeMarker(2, null), // instant + makeMarker(3, 5), // 2ms + ]; + + const stats = computeDurationStats(markers); + expect(stats).toBeDefined(); + expect(stats!.min).toBe(1); + expect(stats!.max).toBe(2); + expect(stats!.avg).toBe(1.5); + }); + + it('computes correct percentiles for larger datasets', function () { + // Create 100 markers with durations 1-100ms + const markers = Array.from({ length: 100 }, (_, i) => + makeMarker(i * 10, i * 10 + i + 1) + ); + + const stats = computeDurationStats(markers); + expect(stats).toBeDefined(); + expect(stats!.min).toBe(1); + expect(stats!.max).toBe(100); + // Median: floor(100/2) = 50th index (0-based) = value 51 + expect(stats!.median).toBe(51); + // p95 = floor(100 * 0.95) = 95th index (0-based) = value 96 + expect(stats!.p95).toBe(96); + // p99 = floor(100 * 0.99) = 99th index (0-based) = value 100 + expect(stats!.p99).toBe(100); + }); + }); + + describe('computeRateStats', function () { + function makeMarker(start: number, end: number | null): Marker { + return { + start, + end, + name: 'TestMarker', + category: 0, + data: null, + threadId: null, + }; + } + + it('handles empty marker list', function () { + const stats = computeRateStats([]); + expect(stats.markersPerSecond).toBe(0); + expect(stats.minGap).toBe(0); + expect(stats.avgGap).toBe(0); + expect(stats.maxGap).toBe(0); + }); + + it('handles single marker', function () { + const stats = computeRateStats([makeMarker(5, 10)]); + expect(stats.markersPerSecond).toBe(0); + expect(stats.minGap).toBe(0); + expect(stats.avgGap).toBe(0); + expect(stats.maxGap).toBe(0); + }); + + it('computes rate for evenly spaced markers', function () { + // Markers at 0, 100, 200, 300, 400 (100ms gaps) + const markers = [ + makeMarker(0, null), + makeMarker(100, null), + makeMarker(200, null), + makeMarker(300, null), + makeMarker(400, null), + ]; + + const stats = computeRateStats(markers); + // Time range: 400 - 0 = 400ms = 0.4s + // 5 markers in 0.4s = 12.5 markers/sec + expect(stats.markersPerSecond).toBeCloseTo(12.5, 5); + expect(stats.minGap).toBe(100); + expect(stats.avgGap).toBe(100); + expect(stats.maxGap).toBe(100); + }); + + it('computes rate for unevenly spaced markers', function () { + const markers = [ + makeMarker(0, null), + makeMarker(10, null), // 10ms gap + makeMarker(15, null), // 5ms gap + makeMarker(100, null), // 85ms gap + ]; + + const stats = computeRateStats(markers); + // Time range: 100 - 0 = 100ms = 0.1s + // 4 markers in 0.1s = 40 markers/sec + expect(stats.markersPerSecond).toBeCloseTo(40, 5); + expect(stats.minGap).toBe(5); + expect(stats.avgGap).toBeCloseTo((10 + 5 + 85) / 3, 5); + expect(stats.maxGap).toBe(85); + }); + + it('sorts markers by start time before computing gaps', function () { + // Provide markers out of order + const markers = [ + makeMarker(100, null), + makeMarker(0, null), + makeMarker(50, null), + ]; + + const stats = computeRateStats(markers); + // After sorting: 0, 50, 100 + // Gaps: 50, 50 + expect(stats.minGap).toBe(50); + expect(stats.avgGap).toBe(50); + expect(stats.maxGap).toBe(50); + }); + + it('handles markers at same timestamp', function () { + const markers = [ + makeMarker(100, null), + makeMarker(100, null), // Same timestamp + makeMarker(200, null), + ]; + + const stats = computeRateStats(markers); + // Gaps: 0, 100 + expect(stats.minGap).toBe(0); + expect(stats.avgGap).toBe(50); + expect(stats.maxGap).toBe(100); + }); + }); +}); diff --git a/src/test/unit/profile-query/call-tree.test.ts b/src/test/unit/profile-query/call-tree.test.ts new file mode 100644 index 0000000000..e62d5187e3 --- /dev/null +++ b/src/test/unit/profile-query/call-tree.test.ts @@ -0,0 +1,589 @@ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +import { FunctionMap } from '../../../profile-query/function-map'; +import { collectCallTree } from '../../../profile-query/formatters/call-tree'; +import type { CallTreeNode } from '../../../profile-query/types'; +import { getProfileFromTextSamples } from '../../fixtures/profiles/processed-profile'; +import { storeWithProfile } from '../../fixtures/stores'; +import { getThreadSelectors } from 'firefox-profiler/selectors/per-thread'; + +describe('call-tree collection', function () { + describe('simple linear tree', function () { + it('respects node budget', function () { + const { profile } = getProfileFromTextSamples(` + A + B + C + D + E + `); + + const store = storeWithProfile(profile); + const state = store.getState(); + const threadSelectors = getThreadSelectors(0); + const callTree = threadSelectors.getCallTree(state); + const functionMap = new FunctionMap(); + const threadIndexes = new Set([0]); + const libs = profile.libs; + + // Collect with budget of 3 nodes + const result = collectCallTree( + callTree, + functionMap, + threadIndexes, + libs, + { + maxNodes: 3, + } + ); + + // Count nodes (excluding virtual root) + const nodeCount = countNodes(result) - 1; + expect(nodeCount).toBeLessThanOrEqual(3); + }); + + it('includes high-score nodes even when deep', function () { + const { profile } = getProfileFromTextSamples(` + A A A + B B B + C C C + D D D + `); + + const store = storeWithProfile(profile); + const state = store.getState(); + const threadSelectors = getThreadSelectors(0); + const callTree = threadSelectors.getCallTree(state); + const functionMap = new FunctionMap(); + const threadIndexes = new Set([0]); + const libs = profile.libs; + + // With small budget, should still include D (100% at depth 3) + const result = collectCallTree( + callTree, + functionMap, + threadIndexes, + libs, + { + maxNodes: 4, + } + ); + + // Should include: A, B, C, D + const nodeNames = collectNodeNames(result); + expect(nodeNames).toContain('A'); + expect(nodeNames).toContain('B'); + expect(nodeNames).toContain('C'); + expect(nodeNames).toContain('D'); + }); + }); + + describe('branching tree', function () { + it('explores hot paths first', function () { + const { profile } = getProfileFromTextSamples(` + A A A A + B B C C + D D + `); + + const store = storeWithProfile(profile); + const state = store.getState(); + const threadSelectors = getThreadSelectors(0); + const callTree = threadSelectors.getCallTree(state); + const functionMap = new FunctionMap(); + const threadIndexes = new Set([0]); + const libs = profile.libs; + + // With budget of 4: should get A, B (50%), D (50%), C (50%) + const result = collectCallTree( + callTree, + functionMap, + threadIndexes, + libs, + { + maxNodes: 4, + } + ); + + const nodeNames = collectNodeNames(result); + expect(nodeNames).toContain('A'); + expect(nodeNames).toContain('B'); // Hot child (50%) + expect(nodeNames).toContain('C'); // Also 50% + // D might or might not be included depending on score ordering + }); + + it('computes elided children stats', function () { + const { profile } = getProfileFromTextSamples(` + A A A A A + B B C D E + `); + + const store = storeWithProfile(profile); + const state = store.getState(); + const threadSelectors = getThreadSelectors(0); + const callTree = threadSelectors.getCallTree(state); + const functionMap = new FunctionMap(); + const threadIndexes = new Set([0]); + const libs = profile.libs; + + // With budget of 2: A and B, should show C/D/E as elided + const result = collectCallTree( + callTree, + functionMap, + threadIndexes, + libs, + { + maxNodes: 2, + } + ); + + const aNode = result.children[0]; + expect(aNode.name).toBe('A'); + expect(aNode.childrenTruncated).toBeDefined(); + expect(aNode.childrenTruncated?.count).toBeGreaterThan(0); + }); + }); + + describe('scoring strategies', function () { + it('exponential-0.9 balances depth and breadth', function () { + const { profile } = getProfileFromTextSamples(` + A A B + C C + D D + `); + + const store = storeWithProfile(profile); + const state = store.getState(); + const threadSelectors = getThreadSelectors(0); + const callTree = threadSelectors.getCallTree(state); + const functionMap = new FunctionMap(); + const threadIndexes = new Set([0]); + const libs = profile.libs; + + const result = collectCallTree( + callTree, + functionMap, + threadIndexes, + libs, + { + maxNodes: 4, + scoringStrategy: 'exponential-0.9', + } + ); + + const nodeNames = collectNodeNames(result); + expect(nodeNames).toContain('A'); // 66% at depth 0 + expect(nodeNames).toContain('B'); // 33% at depth 0 + }); + + it('percentage-only ignores depth', function () { + const { profile } = getProfileFromTextSamples(` + A + B + C + D + `); + + const store = storeWithProfile(profile); + const state = store.getState(); + const threadSelectors = getThreadSelectors(0); + const callTree = threadSelectors.getCallTree(state); + const functionMap = new FunctionMap(); + const threadIndexes = new Set([0]); + const libs = profile.libs; + + const result = collectCallTree( + callTree, + functionMap, + threadIndexes, + libs, + { + maxNodes: 4, + scoringStrategy: 'percentage-only', + } + ); + + // All nodes should have same priority (100%), so all included + const nodeCount = countNodes(result) - 1; + expect(nodeCount).toBe(4); + }); + }); + + describe('complex branching trees', function () { + it('handles multiple levels of branching correctly', function () { + const { profile } = getProfileFromTextSamples(` + A A A A A A B B C + D D E E F F G G + H H I I + J J + `); + + const store = storeWithProfile(profile); + const state = store.getState(); + const threadSelectors = getThreadSelectors(0); + const callTree = threadSelectors.getCallTree(state); + const functionMap = new FunctionMap(); + const threadIndexes = new Set([0]); + const libs = profile.libs; + + const result = collectCallTree( + callTree, + functionMap, + threadIndexes, + libs, + { + maxNodes: 10, + scoringStrategy: 'exponential-0.9', + } + ); + + const nodeNames = collectNodeNames(result); + // Should include high-percentage nodes + expect(nodeNames).toContain('A'); // 66% at depth 0 + expect(nodeNames).toContain('B'); // 22% at depth 0 + expect(nodeNames).toContain('D'); // 22% under A + expect(nodeNames).toContain('E'); // 22% under A + + const nodeCount = countNodes(result) - 1; + expect(nodeCount).toBeLessThanOrEqual(10); + }); + + it('correctly computes elided children percentages', function () { + const { profile } = getProfileFromTextSamples(` + A A A A A A A A A A + B B C C D D E F G H + `); + + const store = storeWithProfile(profile); + const state = store.getState(); + const threadSelectors = getThreadSelectors(0); + const callTree = threadSelectors.getCallTree(state); + const functionMap = new FunctionMap(); + const threadIndexes = new Set([0]); + const libs = profile.libs; + + // Small budget to force truncation + const result = collectCallTree( + callTree, + functionMap, + threadIndexes, + libs, + { + maxNodes: 3, + } + ); + + const aNode = result.children[0]; + expect(aNode.name).toBe('A'); + expect(aNode.childrenTruncated).toBeDefined(); + + // Verify the count and percentages are correct + const truncInfo = aNode.childrenTruncated!; + expect(truncInfo.count).toBeGreaterThan(0); + expect(truncInfo.combinedPercentage).toBeGreaterThan(0); + expect(truncInfo.maxPercentage).toBeGreaterThan(0); + // Max percentage should be <= combined percentage + expect(truncInfo.maxPercentage).toBeLessThanOrEqual( + truncInfo.combinedPercentage + ); + }); + + it('handles wide trees with many children', function () { + // Create a wide tree: A has 15 children + const samples = ` + A A A A A A A A A A A A A A A A + B C D E F G H I J K L M N O P Q + `; + + const { profile } = getProfileFromTextSamples(samples); + const store = storeWithProfile(profile); + const state = store.getState(); + const threadSelectors = getThreadSelectors(0); + const callTree = threadSelectors.getCallTree(state); + const functionMap = new FunctionMap(); + const threadIndexes = new Set([0]); + const libs = profile.libs; + + // First verify that A has many children + const roots = callTree.getRoots(); + expect(roots.length).toBe(1); + const aCallNode = roots[0]; + const aChildren = callTree.getChildren(aCallNode); + expect(aChildren.length).toBe(16); // B through Q + + const result = collectCallTree( + callTree, + functionMap, + threadIndexes, + libs, + { + maxNodes: 5, // Small budget to ensure truncation + maxChildrenPerNode: 10, + } + ); + + const aNode = result.children[0]; + expect(aNode.name).toBe('A'); + + // A has 16 children, but we can only expand 10 (maxChildrenPerNode) + // With budget of 5 total nodes (A + 4 children), we should have truncation + // Either from the 10 expanded children (6 not included) + 6 not expanded = 12 total + // Or if fewer than 4 children included, even more truncated + expect(aNode.childrenTruncated).toBeDefined(); + expect(aNode.childrenTruncated!.count).toBeGreaterThan(0); + }); + + it('preserves correct ordering by sample count', function () { + const { profile } = getProfileFromTextSamples(` + A A A A A A A A + B B B C C D + `); + + const store = storeWithProfile(profile); + const state = store.getState(); + const threadSelectors = getThreadSelectors(0); + const callTree = threadSelectors.getCallTree(state); + const functionMap = new FunctionMap(); + const threadIndexes = new Set([0]); + const libs = profile.libs; + + const result = collectCallTree( + callTree, + functionMap, + threadIndexes, + libs, + { + maxNodes: 10, + } + ); + + const aNode = result.children[0]; + expect(aNode.name).toBe('A'); + + // Children should be ordered B (3 samples), C (2 samples), D (1 sample) + expect(aNode.children.length).toBeGreaterThanOrEqual(2); + expect(aNode.children[0].name).toBe('B'); // Highest sample count + expect(aNode.children[1].name).toBe('C'); + }); + }); + + describe('deep nested structures', function () { + it('includes deep hot paths over shallow cold paths', function () { + const { profile } = getProfileFromTextSamples(` + A A A A A A A A B C + D D D D D D D D + E E E E E E E E + F F F F F F F F + G G G G G G G G + `); + + const store = storeWithProfile(profile); + const state = store.getState(); + const threadSelectors = getThreadSelectors(0); + const callTree = threadSelectors.getCallTree(state); + const functionMap = new FunctionMap(); + const threadIndexes = new Set([0]); + const libs = profile.libs; + + const result = collectCallTree( + callTree, + functionMap, + threadIndexes, + libs, + { + maxNodes: 8, + scoringStrategy: 'exponential-0.9', + } + ); + + const nodeNames = collectNodeNames(result); + // Should include deep path A->D->E->F->G even though it's deep + // because it's 80% of all samples + expect(nodeNames).toContain('A'); + expect(nodeNames).toContain('D'); + expect(nodeNames).toContain('E'); + expect(nodeNames).toContain('F'); + expect(nodeNames).toContain('G'); + }); + + it('respects maxDepth parameter', function () { + // Create deeply nested tree + const samples = Array(50) + .fill(null) + .map((_, i) => `Func${i}`) + .join('\n'); + + const { profile } = getProfileFromTextSamples(samples); + const store = storeWithProfile(profile); + const state = store.getState(); + const threadSelectors = getThreadSelectors(0); + const callTree = threadSelectors.getCallTree(state); + const functionMap = new FunctionMap(); + const threadIndexes = new Set([0]); + const libs = profile.libs; + + const result = collectCallTree( + callTree, + functionMap, + threadIndexes, + libs, + { + maxNodes: 100, + maxDepth: 20, + } + ); + + const maxDepth = findMaxDepth(result); + expect(maxDepth).toBeLessThanOrEqual(20); + }); + }); + + describe('elided children statistics', function () { + it('correctly sums elided children samples', function () { + const { profile } = getProfileFromTextSamples(` + A A A A A A A A A A + B B B C C D E F G H + `); + + const store = storeWithProfile(profile); + const state = store.getState(); + const threadSelectors = getThreadSelectors(0); + const callTree = threadSelectors.getCallTree(state); + const functionMap = new FunctionMap(); + const threadIndexes = new Set([0]); + const libs = profile.libs; + + // Budget that includes A and B, but not the other children + const result = collectCallTree( + callTree, + functionMap, + threadIndexes, + libs, + { + maxNodes: 2, + } + ); + + const aNode = result.children[0]; + expect(aNode.name).toBe('A'); + expect(aNode.children.length).toBe(1); + expect(aNode.children[0].name).toBe('B'); + + // Should have truncated info for C, D, E, F, G, H + expect(aNode.childrenTruncated).toBeDefined(); + expect(aNode.childrenTruncated!.count).toBe(6); + + // Combined samples should be 7 (C:2, D:1, E:1, F:1, G:1, H:1) + expect(aNode.childrenTruncated!.combinedSamples).toBe(7); + // Combined percentage should be 70% of total 10 samples (not relative to A) + expect(aNode.childrenTruncated!.combinedPercentage).toBeCloseTo(70, 0); + + // Max samples should be 2 (from C) + expect(aNode.childrenTruncated!.maxSamples).toBe(2); + // Max percentage should be 20% of total 10 samples (not relative to A) + expect(aNode.childrenTruncated!.maxPercentage).toBeCloseTo(20, 0); + }); + + it('correctly identifies depth where children were truncated', function () { + const { profile } = getProfileFromTextSamples(` + A A A A + B B B B + C D E F + `); + + const store = storeWithProfile(profile); + const state = store.getState(); + const threadSelectors = getThreadSelectors(0); + const callTree = threadSelectors.getCallTree(state); + const functionMap = new FunctionMap(); + const threadIndexes = new Set([0]); + const libs = profile.libs; + + const result = collectCallTree( + callTree, + functionMap, + threadIndexes, + libs, + { + maxNodes: 2, + } + ); + + const aNode = result.children[0]; + const bNode = aNode.children[0]; + expect(bNode.name).toBe('B'); + + // B's children were truncated at depth 2 + expect(bNode.childrenTruncated).toBeDefined(); + expect(bNode.childrenTruncated!.depth).toBe(2); + }); + }); + + describe('depth limit', function () { + it('stops expanding beyond maxDepth', function () { + // Very deep tree + const samples = Array(100) + .fill(null) + .map((_, i) => `Func${i}`) + .join('\n'); + + const { profile } = getProfileFromTextSamples(samples); + + const store = storeWithProfile(profile); + const state = store.getState(); + const threadSelectors = getThreadSelectors(0); + const callTree = threadSelectors.getCallTree(state); + const functionMap = new FunctionMap(); + const threadIndexes = new Set([0]); + const libs = profile.libs; + + const result = collectCallTree( + callTree, + functionMap, + threadIndexes, + libs, + { + maxNodes: 1000, // High budget + maxDepth: 10, // But limited depth + } + ); + + const maxDepthFound = findMaxDepth(result); + expect(maxDepthFound).toBeLessThanOrEqual(10); + }); + }); +}); + +/** + * Count total nodes in tree (including root). + */ +function countNodes(node: CallTreeNode): number { + let count = 1; + for (const child of node.children) { + count += countNodes(child); + } + return count; +} + +/** + * Collect all node names in tree. + */ +function collectNodeNames(node: CallTreeNode): string[] { + const names = [node.name]; + for (const child of node.children) { + names.push(...collectNodeNames(child)); + } + return names; +} + +/** + * Find maximum depth in tree. + */ +function findMaxDepth(node: CallTreeNode): number { + if (node.children.length === 0) { + return node.originalDepth; + } + return Math.max(...node.children.map((child) => findMaxDepth(child))); +} diff --git a/src/test/unit/profile-query/function-list.test.ts b/src/test/unit/profile-query/function-list.test.ts new file mode 100644 index 0000000000..9f3974ffe5 --- /dev/null +++ b/src/test/unit/profile-query/function-list.test.ts @@ -0,0 +1,608 @@ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +import { FunctionMap } from 'firefox-profiler/profile-query/function-map'; +import { + extractFunctionData, + sortByTotal, + sortBySelf, + formatFunctionList, + createTopFunctionLists, + truncateFunctionName, + type FunctionData, +} from '../../../profile-query/function-list'; +import { getProfileFromTextSamples } from '../../fixtures/profiles/processed-profile'; +import type { Lib } from 'firefox-profiler/types'; + +function createMockTree(functions: FunctionData[]) { + return { + getRoots: () => functions.map((_, i) => i), + getNodeData: (index: number) => functions[index], + }; +} + +describe('function-list', function () { + describe('extractFunctionData', function () { + it('extracts function data from a tree', function () { + const { profile, derivedThreads } = getProfileFromTextSamples(` + foo + bar + `); + const [thread] = derivedThreads; + const libs: Lib[] = profile.libs; + + const functions: FunctionData[] = [ + { + funcName: 'foo', + funcIndex: 0, + total: 100, + self: 50, + totalRelative: 0.5, + selfRelative: 0.25, + }, + { + funcName: 'bar', + funcIndex: 1, + total: 80, + self: 60, + totalRelative: 0.4, + selfRelative: 0.3, + }, + ]; + + const tree = createMockTree(functions); + const result = extractFunctionData(tree, thread, libs); + + expect(result).toEqual(functions); + }); + }); + + describe('sortByTotal', function () { + it('sorts functions by total time descending', function () { + const functions: FunctionData[] = [ + { + funcName: 'foo', + funcIndex: 0, + total: 50, + self: 30, + totalRelative: 0.25, + selfRelative: 0.15, + }, + { + funcName: 'bar', + funcIndex: 0, + total: 100, + self: 20, + totalRelative: 0.5, + selfRelative: 0.1, + }, + { + funcName: 'baz', + funcIndex: 0, + total: 75, + self: 40, + totalRelative: 0.375, + selfRelative: 0.2, + }, + ]; + + const sorted = sortByTotal(functions); + + expect(sorted.map((f) => f.funcName)).toEqual(['bar', 'baz', 'foo']); + expect(sorted.map((f) => f.total)).toEqual([100, 75, 50]); + }); + + it('does not mutate the original array', function () { + const functions: FunctionData[] = [ + { + funcName: 'foo', + funcIndex: 0, + total: 50, + self: 30, + totalRelative: 0.25, + selfRelative: 0.15, + }, + { + funcName: 'bar', + funcIndex: 0, + total: 100, + self: 20, + totalRelative: 0.5, + selfRelative: 0.1, + }, + ]; + + const original = [...functions]; + sortByTotal(functions); + + expect(functions).toEqual(original); + }); + }); + + describe('sortBySelf', function () { + it('sorts functions by self time descending', function () { + const functions: FunctionData[] = [ + { + funcName: 'foo', + funcIndex: 0, + total: 100, + self: 30, + totalRelative: 0.5, + selfRelative: 0.15, + }, + { + funcName: 'bar', + funcIndex: 0, + total: 50, + self: 40, + totalRelative: 0.25, + selfRelative: 0.2, + }, + { + funcName: 'baz', + funcIndex: 0, + total: 75, + self: 20, + totalRelative: 0.375, + selfRelative: 0.1, + }, + ]; + + const sorted = sortBySelf(functions); + + expect(sorted.map((f) => f.funcName)).toEqual(['bar', 'foo', 'baz']); + expect(sorted.map((f) => f.self)).toEqual([40, 30, 20]); + }); + }); + + describe('formatFunctionList', function () { + it('formats a complete list with no omissions', function () { + const functions: FunctionData[] = [ + { + funcName: 'foo', + funcIndex: 0, + total: 100, + self: 50, + totalRelative: 0.5, + selfRelative: 0.25, + }, + { + funcName: 'bar', + funcIndex: 0, + total: 80, + self: 40, + totalRelative: 0.4, + selfRelative: 0.2, + }, + ]; + + const result = formatFunctionList( + 'Top Functions', + functions, + 10, + 'total', + new Set([0]), + new FunctionMap() + ); + + expect(result.title).toBe('Top Functions'); + expect(result.stats).toBeNull(); + expect(result.lines.length).toBe(2); + expect(result.lines[0]).toContain('foo'); + expect(result.lines[0]).toContain('total: 100'); + expect(result.lines[1]).toContain('bar'); + }); + + it('formats a list with omissions and shows stats', function () { + const functions: FunctionData[] = [ + { + funcName: 'func1', + funcIndex: 0, + total: 100, + self: 50, + totalRelative: 0.333, + selfRelative: 0.25, + }, + { + funcName: 'func2', + funcIndex: 0, + total: 90, + self: 40, + totalRelative: 0.3, + selfRelative: 0.2, + }, + { + funcName: 'func3', + funcIndex: 0, + total: 80, + self: 30, + totalRelative: 0.267, + selfRelative: 0.15, + }, + { + funcName: 'func4', + funcIndex: 0, + total: 70, + self: 20, + totalRelative: 0.233, + selfRelative: 0.1, + }, + { + funcName: 'func5', + funcIndex: 0, + total: 60, + self: 10, + totalRelative: 0.2, + selfRelative: 0.05, + }, + ]; + + const result = formatFunctionList( + 'Top Functions', + functions, + 3, + 'self', + new Set([0]), + new FunctionMap() + ); + + expect(result.title).toBe('Top Functions'); + expect(result.lines.length).toBe(5); // 3 functions + blank line + stats line + expect(result.stats).toEqual({ + omittedCount: 2, + maxTotal: 70, + maxSelf: 20, + sumSelf: 30, // 20 + 10 + }); + expect(result.lines[3]).toBe(''); + expect(result.lines[4]).toContain('2 more functions omitted'); + expect(result.lines[4]).toContain('max total: 70'); + expect(result.lines[4]).toContain('max self: 20'); + expect(result.lines[4]).toContain('sum of self: 30'); + }); + + it('formats entries with total first when sortKey is total', function () { + const functions: FunctionData[] = [ + { + funcName: 'foo', + funcIndex: 0, + total: 100, + self: 50, + totalRelative: 0.5, + selfRelative: 0.25, + }, + ]; + + const result = formatFunctionList( + 'Top Functions', + functions, + 10, + 'total', + new Set([0]), + new FunctionMap() + ); + + expect(result.lines[0]).toMatch(/total:.*self:/); + expect(result.lines[0]).toContain('total: 100 (50.0%)'); + expect(result.lines[0]).toContain('self: 50 (25.0%)'); + }); + + it('formats entries with self first when sortKey is self', function () { + const functions: FunctionData[] = [ + { + funcName: 'foo', + funcIndex: 0, + total: 100, + self: 50, + totalRelative: 0.5, + selfRelative: 0.25, + }, + ]; + + const result = formatFunctionList( + 'Top Functions', + functions, + 10, + 'self', + new Set([0]), + new FunctionMap() + ); + + expect(result.lines[0]).toMatch(/self:.*total:/); + expect(result.lines[0]).toContain('self: 50 (25.0%)'); + expect(result.lines[0]).toContain('total: 100 (50.0%)'); + }); + }); + + describe('createTopFunctionLists', function () { + it('creates two lists sorted by total and self', function () { + const functions: FunctionData[] = [ + { + funcName: 'highTotal', + funcIndex: 0, + total: 100, + self: 20, + totalRelative: 0.5, + selfRelative: 0.1, + }, + { + funcName: 'highSelf', + funcIndex: 0, + total: 50, + self: 40, + totalRelative: 0.25, + selfRelative: 0.2, + }, + { + funcName: 'mid', + funcIndex: 0, + total: 75, + self: 30, + totalRelative: 0.375, + selfRelative: 0.15, + }, + ]; + + const result = createTopFunctionLists( + functions, + 10, + new Set([0]), + new FunctionMap() + ); + + expect(result.byTotal.title).toBe('Top Functions (by total time)'); + expect(result.bySelf.title).toBe('Top Functions (by self time)'); + + // Check byTotal is sorted by total + expect(result.byTotal.lines[0]).toContain('highTotal'); + expect(result.byTotal.lines[1]).toContain('mid'); + expect(result.byTotal.lines[2]).toContain('highSelf'); + + // Check bySelf is sorted by self + expect(result.bySelf.lines[0]).toContain('highSelf'); + expect(result.bySelf.lines[1]).toContain('mid'); + expect(result.bySelf.lines[2]).toContain('highTotal'); + }); + + it('respects the limit and shows stats for omitted functions', function () { + const functions: FunctionData[] = [ + { + funcName: 'func1', + funcIndex: 0, + total: 100, + self: 50, + totalRelative: 0.4, + selfRelative: 0.2, + }, + { + funcName: 'func2', + funcIndex: 0, + total: 90, + self: 40, + totalRelative: 0.36, + selfRelative: 0.16, + }, + { + funcName: 'func3', + funcIndex: 0, + total: 80, + self: 30, + totalRelative: 0.32, + selfRelative: 0.12, + }, + ]; + + const result = createTopFunctionLists( + functions, + 2, + new Set([0]), + new FunctionMap() + ); + + // Each list should have 2 functions + blank + stats = 4 lines + expect(result.byTotal.lines.length).toBe(4); + expect(result.bySelf.lines.length).toBe(4); + + expect(result.byTotal.stats?.omittedCount).toBe(1); + expect(result.bySelf.stats?.omittedCount).toBe(1); + }); + }); + + describe('truncateFunctionName', function () { + it('returns names unchanged when they fit within the limit', function () { + expect(truncateFunctionName('RtlUserThreadStart', 120)).toBe( + 'RtlUserThreadStart' + ); + expect(truncateFunctionName('foo::bar::baz()', 120)).toBe( + 'foo::bar::baz()' + ); + expect( + truncateFunctionName('std::vector::push_back(int const&)', 120) + ).toBe('std::vector::push_back(int const&)'); + }); + + it('truncates simple C++ namespaced functions', function () { + const name = + 'some::very::long::namespace::hierarchy::with::many::levels::FunctionName()'; + const result = truncateFunctionName(name, 50); + + // Should preserve the function name at the end + expect(result).toContain('FunctionName()'); + // Should show some context at the beginning + expect(result).toContain('some::'); + expect(result.length).toBeLessThanOrEqual(50); + }); + + it('truncates complex template parameters intelligently', function () { + const name = + 'std::_Hash,std::equal_to>,std::allocator>,0>>::~_Hash()'; + const result = truncateFunctionName(name, 120); + + // Should preserve namespace prefix and function name + expect(result).toContain('std::_Hash<'); + expect(result).toContain('~_Hash()'); + // Should have collapsed some template parameters + expect(result.length).toBeLessThanOrEqual(120); + }); + + it('truncates function parameters while preserving function name', function () { + const name = + 'mozilla::wr::RenderThread::UpdateAndRender(mozilla::wr::WrWindowId, mozilla::layers::BaseTransactionId)'; + const result = truncateFunctionName(name, 120); + + // Function name should always be preserved + expect(result).toContain('UpdateAndRender('); + expect(result).toContain(')'); + // Should preserve context + expect(result).toContain('mozilla::wr::RenderThread::'); + expect(result.length).toBeLessThanOrEqual(120); + }); + + it('handles library prefixes correctly', function () { + const name = + 'nvoglv64.dll!mozilla::wr::RenderThread::UpdateAndRender(mozilla::wr::WrWindowId)'; + const result = truncateFunctionName(name, 120); + + // Library prefix should be preserved + expect(result).toStartWith('nvoglv64.dll!'); + // Function should still be visible + expect(result).toContain('UpdateAndRender'); + expect(result.length).toBeLessThanOrEqual(120); + }); + + it('handles very long library prefixes gracefully', function () { + const name = + 'a-very-long-library-name-that-is-too-long.dll!FunctionName()'; + const result = truncateFunctionName(name, 30); + + // Should fall back to simple truncation + expect(result.length).toBeLessThanOrEqual(30); + expect(result).toContain('...'); + }); + + it('truncates nested templates by collapsing inner content', function () { + const name = + 'mozilla::interceptor::FuncHook>::operator()'; + const result = truncateFunctionName(name, 120); + + // Should show outer template structure + expect(result).toContain('FuncHook<'); + expect(result).toContain('operator()'); + // Inner templates should be collapsed + expect(result.length).toBeLessThanOrEqual(120); + }); + + it('handles functions with no namespaces', function () { + const name = 'malloc'; + expect(truncateFunctionName(name, 120)).toBe('malloc'); + + const name2 = 'RtlUserThreadStart'; + expect(truncateFunctionName(name2, 120)).toBe('RtlUserThreadStart'); + }); + + it('handles empty parameters', function () { + expect(truncateFunctionName('foo::bar()', 120)).toBe('foo::bar()'); + expect(truncateFunctionName('SomeClass::Method()', 120)).toBe( + 'SomeClass::Method()' + ); + }); + + it('breaks at namespace boundaries when truncating prefix', function () { + const name = + 'namespace1::namespace2::namespace3::namespace4::namespace5::FunctionName()'; + const result = truncateFunctionName(name, 50); + + // Should break at :: boundaries, not mid-word + expect(result).not.toMatch(/[a-z]::[A-Z]/); // No broken words + expect(result).toContain('FunctionName()'); + expect(result.length).toBeLessThanOrEqual(50); + }); + + it('preserves closing parenthesis for functions with parameters', function () { + const name = 'SomeClass::Method(int, std::string, std::vector)'; + const result = truncateFunctionName(name, 40); + + // Should always have matching parentheses + expect(result).toContain('Method('); + expect(result).toContain(')'); + expect(result.length).toBeLessThanOrEqual(40); + }); + + it('handles deeply nested templates', function () { + const name = + 'std::vector>>>'; + const result = truncateFunctionName(name, 50); + + // Should show outer structure + expect(result).toContain('std::vector<'); + expect(result).toContain('>'); + // Should have collapsed inner content + expect(result.length).toBeLessThanOrEqual(50); + }); + + it('allocates more space to suffix (function name) when possible', function () { + const name = + 'short::VeryLongFunctionNameThatShouldBePreservedBecauseItIsImportant(parameter1, parameter2, parameter3)'; + const result = truncateFunctionName(name, 100); + + // Function name should be prioritized over prefix + expect(result).toContain('VeryLongFunctionName'); + expect(result.length).toBeLessThanOrEqual(100); + }); + + it('handles mixed templates and parameters', function () { + const name = + 'std::map::insert(std::pair const&)'; + const result = truncateFunctionName(name, 60); + + expect(result).toContain('insert('); + expect(result).toContain(')'); + expect(result.length).toBeLessThanOrEqual(60); + }); + + it('returns consistent results for the same input', function () { + const name = + 'mozilla::wr::RenderThread::UpdateAndRender(mozilla::wr::WrWindowId)'; + const result1 = truncateFunctionName(name, 100); + const result2 = truncateFunctionName(name, 100); + + expect(result1).toBe(result2); + }); + + it('handles edge case of very small maxLength', function () { + const name = 'SomeClass::SomeMethod()'; + const result = truncateFunctionName(name, 15); + + // Should still produce something reasonable and prioritize the function name + expect(result.length).toBeLessThanOrEqual(15); + expect(result.length).toBeGreaterThan(0); + // When space is very limited, it may drop the namespace to show the function name + expect(result).toContain('SomeMethod'); + }); + + it('handles names with only templates and no function name', function () { + const name = 'std::vector'; + const result = truncateFunctionName(name, 50); + + expect(result).toContain('std::vector<'); + expect(result.length).toBeLessThanOrEqual(50); + }); + + it('truncates while preserving critical structure markers', function () { + const name = 'foo::bar::qux(param1, param2, param3, param4)'; + const result = truncateFunctionName(name, 35); + + // Should maintain bracket pairing + const openAngles = (result.match(//g) || []).length; + const openParens = (result.match(/\(/g) || []).length; + const closeParens = (result.match(/\)/g) || []).length; + + // All opened brackets should be closed + expect(openAngles).toBe(closeAngles); + expect(openParens).toBe(closeParens); + expect(result.length).toBeLessThanOrEqual(35); + }); + }); +}); diff --git a/src/test/unit/profile-query/process-thread-list.test.ts b/src/test/unit/profile-query/process-thread-list.test.ts new file mode 100644 index 0000000000..f394e4fe7d --- /dev/null +++ b/src/test/unit/profile-query/process-thread-list.test.ts @@ -0,0 +1,379 @@ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +import { buildProcessThreadList } from 'firefox-profiler/profile-query/process-thread-list'; + +import type { ThreadInfo } from 'firefox-profiler/profile-query/process-thread-list'; + +describe('buildProcessThreadList', function () { + function createThread( + threadIndex: number, + pid: string, + name: string, + cpuMs: number + ): ThreadInfo { + return { threadIndex, pid, name, cpuMs }; + } + + it('shows top 5 processes by CPU, plus any needed for top 20 threads', function () { + // All 7 threads are in top 20, so all 7 processes should be shown + const threads: ThreadInfo[] = [ + createThread(0, 'p1', 'Thread1', 100), + createThread(1, 'p2', 'Thread2', 80), + createThread(2, 'p3', 'Thread3', 60), + createThread(3, 'p4', 'Thread4', 40), + createThread(4, 'p5', 'Thread5', 20), + createThread(5, 'p6', 'Thread6', 10), + createThread(6, 'p7', 'Thread7', 5), + ]; + + const processIndexMap = new Map([ + ['p1', 0], + ['p2', 1], + ['p3', 2], + ['p4', 3], + ['p5', 4], + ['p6', 5], + ['p7', 6], + ]); + + const result = buildProcessThreadList(threads, processIndexMap); + + // All 7 threads are in top 20, so all 7 processes are shown + expect(result.processes.length).toBe(7); + expect(result.processes.map((p) => p.pid)).toEqual([ + 'p1', + 'p2', + 'p3', + 'p4', + 'p5', + 'p6', + 'p7', + ]); + }); + + it('includes processes with threads in top 20, even if not in top 5 processes', function () { + // Process p1 has high CPU from one thread + // Process p2 has low CPU total but has a thread in the top 20 + const threads: ThreadInfo[] = [ + createThread(0, 'p1', 'Thread1', 100), + createThread(1, 'p1', 'Thread2', 1), + createThread(2, 'p1', 'Thread3', 1), + createThread(3, 'p2', 'HighCPU', 50), // This thread is in top 20 + createThread(4, 'p2', 'LowCPU', 0.5), + createThread(5, 'p3', 'Thread6', 80), + createThread(6, 'p4', 'Thread7', 70), + createThread(7, 'p5', 'Thread8', 60), + createThread(8, 'p6', 'Thread9', 55), + ]; + + const processIndexMap = new Map([ + ['p1', 0], + ['p2', 1], + ['p3', 2], + ['p4', 3], + ['p5', 4], + ['p6', 5], + ]); + + const result = buildProcessThreadList(threads, processIndexMap); + + // Should include p2 even though it's not in top 5 by total CPU + // because it has a thread (t3) in the top 20 + expect(result.processes.map((p) => p.pid)).toContain('p2'); + }); + + it('shows up to 5 threads per process when none are in top 20', function () { + // Create 4 high-CPU processes that will be in top 5 + const threads: ThreadInfo[] = []; + threads.push(createThread(0, 'p-high-0', 'High1', 10000)); + threads.push(createThread(1, 'p-high-1', 'High2', 9000)); + threads.push(createThread(2, 'p-high-2', 'High3', 8000)); + threads.push(createThread(3, 'p-high-3', 'High4', 7000)); + + // p1 will be 5th by total CPU (with many threads but none in top 20) + threads.push(createThread(10, 'p1', 'Thread1', 600)); + threads.push(createThread(11, 'p1', 'Thread2', 500)); + threads.push(createThread(12, 'p1', 'Thread3', 400)); + threads.push(createThread(13, 'p1', 'Thread4', 300)); + threads.push(createThread(14, 'p1', 'Thread5', 200)); + threads.push(createThread(15, 'p1', 'Thread6', 100)); + threads.push(createThread(16, 'p1', 'Thread7', 50)); + // p1 total: 2150ms, should be 5th place + + // Add threads that will fill positions 5-20 in top 20, pushing out p1's threads + threads.push(createThread(4, 'p2', 'Med1', 6000)); + threads.push(createThread(5, 'p2', 'Med2', 5000)); + threads.push(createThread(6, 'p3', 'Med3', 4000)); + threads.push(createThread(7, 'p3', 'Med4', 3000)); + threads.push(createThread(8, 'p4', 'Med5', 2000)); + threads.push(createThread(9, 'p4', 'Med6', 1000)); + threads.push(createThread(20, 'p5', 'Med7', 900)); + threads.push(createThread(21, 'p5', 'Med8', 800)); + threads.push(createThread(22, 'p6', 'Med9', 700)); + threads.push(createThread(23, 'p6', 'Med10', 650)); + threads.push(createThread(24, 'p7', 'Med11', 640)); + threads.push(createThread(25, 'p7', 'Med12', 630)); + threads.push(createThread(26, 'p8', 'Med13', 620)); + threads.push(createThread(27, 'p8', 'Med14', 610)); + // Top 20 are now: 10000, 9000, 8000, 7000, 6000, 5000, 4000, 3000, 2000, 1000, 900, 800, 700, 650, 640, 630, 620, 610, 600, 500 + // p1's highest is 600ms (position 19) and 500ms (position 20) + + const processIndexMap = new Map([ + ['p-high-0', 0], + ['p-high-1', 1], + ['p-high-2', 2], + ['p-high-3', 3], + ['p1', 4], + ['p2', 5], + ['p3', 6], + ['p4', 7], + ['p5', 8], + ['p6', 9], + ['p7', 10], + ['p8', 11], + ]); + + const result = buildProcessThreadList(threads, processIndexMap); + + const p1 = result.processes.find((p) => p.pid === 'p1'); + expect(p1).toBeDefined(); + // t10 and t11 from p1 are in top 20, plus we fill up to 5 total + expect(p1!.threads.length).toBe(5); + // Should show the 2 from top 20 plus the next 3 highest + expect(p1!.threads.map((t) => t.threadIndex)).toEqual([10, 11, 12, 13, 14]); + }); + + it('includes summary for remaining threads', function () { + // Create scenario where only some threads from p1 are in top 20 + const threads: ThreadInfo[] = []; + + // Add 15 high-CPU threads from other processes + for (let i = 0; i < 15; i++) { + threads.push( + createThread(i, `p-high-${i}`, `HighCPU${i}`, 1000 - i * 10) + ); + } + + // Add p1 threads - the first 5 will be in top 20 (850ms is above 910ms cutoff) + threads.push(createThread(15, 'p1', 'Thread1', 950)); // In top 20 + threads.push(createThread(16, 'p1', 'Thread2', 940)); // In top 20 + threads.push(createThread(17, 'p1', 'Thread3', 930)); // In top 20 + threads.push(createThread(18, 'p1', 'Thread4', 920)); // In top 20 + threads.push(createThread(19, 'p1', 'Thread5', 910)); // In top 20 (20th place) + // These are not in top 20 + threads.push(createThread(20, 'p1', 'Thread6', 50)); + threads.push(createThread(21, 'p1', 'Thread7', 40)); + threads.push(createThread(22, 'p1', 'Thread8', 30)); + + const processIndexMap = new Map([['p1', 100]]); + for (let i = 0; i < 15; i++) { + processIndexMap.set(`p-high-${i}`, i); + } + + const result = buildProcessThreadList(threads, processIndexMap); + + const p1 = result.processes.find((p) => p.pid === 'p1'); + expect(p1).toBeDefined(); + + // Should show 5 top-20 threads + expect(p1!.threads.length).toBe(5); + expect(p1!.threads.map((t) => t.threadIndex)).toEqual([15, 16, 17, 18, 19]); + + // Should have remaining threads summary + expect(p1!.remainingThreads).toEqual({ + count: 3, + combinedCpuMs: 120, // 50 + 40 + 30 + maxCpuMs: 50, + }); + }); + + it('shows ALL top-20 threads from a process, even if more than 5', function () { + // This is the critical test case for the bug fix: + // If a process has 7 threads in the top 20, all 7 should be shown, + // not just the first 5. + const threads: ThreadInfo[] = [ + // Process p1 has 7 threads in the top 20 + createThread(0, 'p1', 'Thread1', 100), + createThread(1, 'p1', 'Thread2', 95), + createThread(2, 'p1', 'Thread3', 90), + createThread(3, 'p1', 'Thread4', 85), + createThread(4, 'p1', 'Thread5', 80), + createThread(5, 'p1', 'Thread6', 75), + createThread(6, 'p1', 'Thread7', 70), + // These threads from p1 are not in top 20 + createThread(7, 'p1', 'Thread8', 5), + createThread(8, 'p1', 'Thread9', 4), + // Other processes to fill out the top 20 + createThread(9, 'p2', 'Thread10', 65), + createThread(10, 'p2', 'Thread11', 60), + createThread(11, 'p3', 'Thread12', 55), + createThread(12, 'p3', 'Thread13', 50), + createThread(13, 'p4', 'Thread14', 45), + createThread(14, 'p4', 'Thread15', 40), + createThread(15, 'p5', 'Thread16', 35), + createThread(16, 'p5', 'Thread17', 30), + createThread(17, 'p6', 'Thread18', 25), + createThread(18, 'p6', 'Thread19', 20), + createThread(19, 'p7', 'Thread20', 15), + createThread(20, 'p7', 'Thread21', 10), + createThread(21, 'p8', 'Thread22', 9), + createThread(22, 'p8', 'Thread23', 8), + createThread(23, 'p9', 'Thread24', 7), + createThread(24, 'p9', 'Thread25', 6), + // More threads below top 20 - these push out t7 and t8 from p1 + ]; + + const processIndexMap = new Map([ + ['p1', 0], + ['p2', 1], + ['p3', 2], + ['p4', 3], + ['p5', 4], + ['p6', 5], + ['p7', 6], + ['p8', 7], + ['p9', 8], + ]); + + const result = buildProcessThreadList(threads, processIndexMap); + + const p1 = result.processes.find((p) => p.pid === 'p1'); + expect(p1).toBeDefined(); + + // Should show all 7 threads from top 20, not just 5 + expect(p1!.threads.length).toBe(7); + expect(p1!.threads.map((t) => t.threadIndex)).toEqual([ + 0, 1, 2, 3, 4, 5, 6, + ]); + + // Should have remaining threads summary for the 2 threads not in top 20 + expect(p1!.remainingThreads).toEqual({ + count: 2, + combinedCpuMs: 9, // 5 + 4 + maxCpuMs: 5, + }); + }); + + it('sorts threads by CPU within each process', function () { + const threads: ThreadInfo[] = [ + createThread(0, 'p1', 'Low', 10), + createThread(1, 'p1', 'High', 100), + createThread(2, 'p1', 'Medium', 50), + ]; + + const processIndexMap = new Map([['p1', 0]]); + + const result = buildProcessThreadList(threads, processIndexMap); + + expect(result.processes[0].threads.map((t) => t.name)).toEqual([ + 'High', + 'Medium', + 'Low', + ]); + }); + + it('handles empty thread list', function () { + const threads: ThreadInfo[] = []; + const processIndexMap = new Map(); + + const result = buildProcessThreadList(threads, processIndexMap); + + expect(result.processes).toEqual([]); + expect(result.remainingProcesses).toBeUndefined(); + }); + + it('handles single thread', function () { + const threads: ThreadInfo[] = [createThread(0, 'p1', 'OnlyThread', 100)]; + + const processIndexMap = new Map([['p1', 0]]); + + const result = buildProcessThreadList(threads, processIndexMap); + + expect(result.processes.length).toBe(1); + expect(result.processes[0].threads.length).toBe(1); + expect(result.processes[0].remainingThreads).toBeUndefined(); + expect(result.remainingProcesses).toBeUndefined(); + }); + + it('correctly aggregates CPU time per process', function () { + const threads: ThreadInfo[] = [ + createThread(0, 'p1', 'Thread1', 100), + createThread(1, 'p1', 'Thread2', 50), + createThread(2, 'p1', 'Thread3', 25), + createThread(3, 'p2', 'Thread4', 200), + ]; + + const processIndexMap = new Map([ + ['p1', 0], + ['p2', 1], + ]); + + const result = buildProcessThreadList(threads, processIndexMap); + + const p1 = result.processes.find((p) => p.pid === 'p1'); + const p2 = result.processes.find((p) => p.pid === 'p2'); + + expect(p1!.cpuMs).toBe(175); // 100 + 50 + 25 + expect(p2!.cpuMs).toBe(200); + }); + + it('includes summary for remaining processes', function () { + // Create a scenario with many processes, where only some are shown + // We need the top 5 processes to be shown, but processes 6-10 should NOT have + // any threads in the top 20 overall + const threads: ThreadInfo[] = []; + + // Add 20 high-CPU threads from top 5 processes + // Each of these processes gets 4 threads in the top 20 + for (let procNum = 0; procNum < 5; procNum++) { + for (let threadNum = 0; threadNum < 4; threadNum++) { + const threadIndex = procNum * 4 + threadNum; + const cpuMs = 1000 - threadIndex * 10; // 1000, 990, 980, ... down to 810 + threads.push( + createThread( + threadIndex, + `p${procNum}`, + `Thread${threadIndex}`, + cpuMs + ) + ); + } + } + + // Add 5 more processes with low CPU (not in top 20) + // These should not be shown + for (let procNum = 5; procNum < 10; procNum++) { + const threadIndex = 20 + procNum - 5; + const cpuMs = 50 - (procNum - 5) * 10; // 50, 40, 30, 20, 10 + threads.push( + createThread(threadIndex, `p${procNum}`, `Thread${threadIndex}`, cpuMs) + ); + } + + const processIndexMap = new Map(); + for (let i = 0; i < 10; i++) { + processIndexMap.set(`p${i}`, i); + } + + const result = buildProcessThreadList(threads, processIndexMap); + + // Should show only top 5 processes (those with threads in top 20) + expect(result.processes.length).toBe(5); + expect(result.processes.map((p) => p.pid)).toEqual([ + 'p0', + 'p1', + 'p2', + 'p3', + 'p4', + ]); + + // Should have remaining processes summary for the last 5 processes + expect(result.remainingProcesses).toEqual({ + count: 5, + combinedCpuMs: 150, // 50 + 40 + 30 + 20 + 10 + maxCpuMs: 50, + }); + }); +}); diff --git a/src/test/unit/profile-query/profile-querier.test.ts b/src/test/unit/profile-query/profile-querier.test.ts new file mode 100644 index 0000000000..cd4c01dd0a --- /dev/null +++ b/src/test/unit/profile-query/profile-querier.test.ts @@ -0,0 +1,210 @@ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +/** + * Unit tests for ProfileQuerier class. + * + * NOTE: Currently minimal tests. + * + * The ProfileQuerier class is tested through integration tests in bash scripts + * (bin/pq-test) that load real profiles and verify the output. + * + * Unit tests can be added here for specific utility methods or edge cases that + * are easier to test in isolation. The summarize() method uses the + * buildProcessThreadList function which is thoroughly tested in + * process-thread-list.test.ts. + */ + +import { ProfileQuerier } from 'firefox-profiler/profile-query'; +import { getProfileFromTextSamples } from '../../fixtures/profiles/processed-profile'; +import { getProfileRootRange } from 'firefox-profiler/selectors/profile'; +import { storeWithProfile } from '../../fixtures/stores'; + +describe('ProfileQuerier', function () { + describe('pushViewRange', function () { + it('changes thread samples output to show functions in the selected range', async function () { + // Create a profile with samples at different times that have different call stacks + // Time 0-10ms: call stack has functions A, B, C + // Time 10-20ms: call stack has functions A, B, D + // Time 20-30ms: call stack has functions A, B, E + const { profile } = getProfileFromTextSamples(` + 0 10 20 + A A A + B B B + C D E + `); + + // Set up the store with the profile + const store = storeWithProfile(profile); + const state = store.getState(); + const rootRange = getProfileRootRange(state); + + // Create ProfileQuerier + const querier = new ProfileQuerier(store, rootRange); + + // Get baseline thread samples (should show all functions A, B, C, D, E) + // Don't pass thread handle - use default selected thread + const baselineSamples = await querier.threadSamples(); + const allFunctions = [ + ...baselineSamples.topFunctionsByTotal.map((f) => f.name), + ...baselineSamples.topFunctionsBySelf.map((f) => f.name), + ].join(' '); + expect(allFunctions).toContain('A'); + expect(allFunctions).toContain('B'); + // At least some of C, D, E should appear + const hasC = allFunctions.includes('C'); + const hasD = allFunctions.includes('D'); + const hasE = allFunctions.includes('E'); + expect(hasC || hasD || hasE).toBe(true); + + // Create timestamp names for a narrower range + // The profile has samples at 0ms, 10ms, 20ms + // Select from just after start to just before end to focus on middle sample + const startName = querier._timestampManager.nameForTimestamp( + rootRange.start + 8 + ); + const endName = querier._timestampManager.nameForTimestamp( + rootRange.start + 12 + ); + + // Push a range that includes only the middle sample (at 10ms) + // This should focus on the call stack with D + await querier.pushViewRange(`${startName},${endName}`); + + // Get thread samples again - should now focus on the selected range + const rangedSamples = await querier.threadSamples(); + + // The output should still contain A and B (common to all stacks) + const rangedAllFunctions = [ + ...rangedSamples.topFunctionsByTotal.map((f) => f.name), + ...rangedSamples.topFunctionsBySelf.map((f) => f.name), + ].join(' '); + expect(rangedAllFunctions).toContain('A'); + expect(rangedAllFunctions).toContain('B'); + + // After pushing a range, the samples should be different from baseline + expect(rangedSamples).not.toBe(baselineSamples); + }); + + it('popViewRange restores the previous view', async function () { + const { profile } = getProfileFromTextSamples(` + 0 10 20 + A A A + B B B + C D E + `); + + const store = storeWithProfile(profile); + const state = store.getState(); + const rootRange = getProfileRootRange(state); + + const querier = new ProfileQuerier(store, rootRange); + + // Get baseline samples + const baselineSamples = await querier.threadSamples(); + + // Create timestamp names and push a range + const startName = querier._timestampManager.nameForTimestamp( + rootRange.start + 5 + ); + const endName = querier._timestampManager.nameForTimestamp( + rootRange.start + 15 + ); + await querier.pushViewRange(`${startName},${endName}`); + const rangedSamples = await querier.threadSamples(); + + // Samples should be different after push + expect(rangedSamples).not.toBe(baselineSamples); + + // Pop the range + const popResult = await querier.popViewRange(); + expect(popResult.message).toContain('Popped view range'); + + // Samples should be back to baseline (or at least different from ranged) + const afterPopSamples = await querier.threadSamples(); + expect(afterPopSamples).not.toBe(rangedSamples); + }); + + it('shows non-empty output after pushing a range with samples', async function () { + // Create a profile with many samples across a longer time range + const { profile } = getProfileFromTextSamples(` + 0 1 2 3 4 5 6 7 8 9 10 11 12 + A A A A A A A A A A A A A + B B B B B B B B B B B B B + C C C D D D E E E F F F G + `); + + const store = storeWithProfile(profile); + const state = store.getState(); + const rootRange = getProfileRootRange(state); + + const querier = new ProfileQuerier(store, rootRange); + + // Push a range that includes samples in the middle (5-8ms should include samples at 5, 6, 7, 8) + const startName = querier._timestampManager.nameForTimestamp( + rootRange.start + 5 + ); + const endName = querier._timestampManager.nameForTimestamp( + rootRange.start + 8 + ); + await querier.pushViewRange(`${startName},${endName}`); + + const rangedSamples = await querier.threadSamples(); + + // The output should NOT be empty - it should contain functions from the selected range + const rangedFunctions = [ + ...rangedSamples.topFunctionsByTotal.map((f) => f.name), + ...rangedSamples.topFunctionsBySelf.map((f) => f.name), + ].join(' '); + expect(rangedFunctions).toContain('A'); + expect(rangedFunctions).toContain('B'); + + // Should show D and/or E (which are in the range) + const hasD = rangedFunctions.includes('D'); + const hasE = rangedFunctions.includes('E'); + expect(hasD || hasE).toBe(true); + + // Should show actual function data, not empty sections + expect(rangedSamples.topFunctionsByTotal.length).toBeGreaterThan(0); + expect(rangedSamples.topFunctionsBySelf.length).toBeGreaterThan(0); + }); + + it('works correctly with absolute timestamps and non-zero profile start', async function () { + // Create a profile that starts at 1000ms (not zero) + const { profile } = getProfileFromTextSamples(` + 1000 1005 1010 1015 1020 + A A A A A + B B B B B + C D E F G + `); + + const store = storeWithProfile(profile); + const state = store.getState(); + const rootRange = getProfileRootRange(state); + + const querier = new ProfileQuerier(store, rootRange); + + // Push a range using absolute timestamps + // pushViewRange should convert these to relative timestamps for commitRange + const startName = querier._timestampManager.nameForTimestamp(1005); + const endName = querier._timestampManager.nameForTimestamp(1015); + await querier.pushViewRange(`${startName},${endName}`); + + const rangedSamples = await querier.threadSamples(); + + // Should contain functions from the selected range (1005-1015ms) + const rangedFunctions2 = [ + ...rangedSamples.topFunctionsByTotal.map((f) => f.name), + ...rangedSamples.topFunctionsBySelf.map((f) => f.name), + ].join(' '); + expect(rangedFunctions2).toContain('A'); + expect(rangedFunctions2).toContain('B'); + + // Should contain D and E which are in the middle of the range + const hasD = rangedFunctions2.includes('D'); + const hasE = rangedFunctions2.includes('E'); + expect(hasD || hasE).toBe(true); + }); + }); +}); diff --git a/src/test/unit/profile-query/time-range-parser.test.ts b/src/test/unit/profile-query/time-range-parser.test.ts new file mode 100644 index 0000000000..1091cc48af --- /dev/null +++ b/src/test/unit/profile-query/time-range-parser.test.ts @@ -0,0 +1,145 @@ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +import { parseTimeValue } from '../../../profile-query/time-range-parser'; +import type { StartEndRange } from 'firefox-profiler/types'; + +describe('parseTimeValue', () => { + const rootRange: StartEndRange = { + start: 1000, + end: 11000, + }; + + describe('timestamp names', () => { + it('returns null for timestamp names', () => { + expect(parseTimeValue('ts-0', rootRange)).toBe(null); + expect(parseTimeValue('ts-6', rootRange)).toBe(null); + expect(parseTimeValue('ts-Z', rootRange)).toBe(null); + expect(parseTimeValue('ts<0', rootRange)).toBe(null); + expect(parseTimeValue('ts>1', rootRange)).toBe(null); + }); + }); + + describe('seconds (no suffix)', () => { + it('parses seconds as default format', () => { + expect(parseTimeValue('0', rootRange)).toBe(1000); + expect(parseTimeValue('1', rootRange)).toBe(2000); + expect(parseTimeValue('5', rootRange)).toBe(6000); + expect(parseTimeValue('10', rootRange)).toBe(11000); + }); + + it('parses decimal seconds', () => { + expect(parseTimeValue('0.5', rootRange)).toBe(1500); + expect(parseTimeValue('2.7', rootRange)).toBe(3700); + expect(parseTimeValue('3.14', rootRange)).toBe(4140); + }); + + it('handles leading zeros', () => { + expect(parseTimeValue('0.001', rootRange)).toBe(1001); + expect(parseTimeValue('00.5', rootRange)).toBe(1500); + }); + }); + + describe('seconds with suffix', () => { + it('parses seconds with "s" suffix', () => { + expect(parseTimeValue('0s', rootRange)).toBe(1000); + expect(parseTimeValue('1s', rootRange)).toBe(2000); + expect(parseTimeValue('5s', rootRange)).toBe(6000); + }); + + it('parses decimal seconds with "s" suffix', () => { + expect(parseTimeValue('0.5s', rootRange)).toBe(1500); + expect(parseTimeValue('2.7s', rootRange)).toBe(3700); + }); + }); + + describe('milliseconds', () => { + it('parses milliseconds', () => { + expect(parseTimeValue('0ms', rootRange)).toBe(1000); + expect(parseTimeValue('1000ms', rootRange)).toBe(2000); + expect(parseTimeValue('2700ms', rootRange)).toBe(3700); + expect(parseTimeValue('10000ms', rootRange)).toBe(11000); + }); + + it('parses decimal milliseconds', () => { + expect(parseTimeValue('500ms', rootRange)).toBe(1500); + expect(parseTimeValue('0.5ms', rootRange)).toBe(1000.5); + }); + }); + + describe('percentages', () => { + it('parses percentages of profile duration', () => { + // Profile duration is 10000ms (11000 - 1000) + expect(parseTimeValue('0%', rootRange)).toBe(1000); + expect(parseTimeValue('10%', rootRange)).toBe(2000); + expect(parseTimeValue('50%', rootRange)).toBe(6000); + expect(parseTimeValue('100%', rootRange)).toBe(11000); + }); + + it('parses decimal percentages', () => { + expect(parseTimeValue('5%', rootRange)).toBe(1500); + expect(parseTimeValue('25%', rootRange)).toBe(3500); + expect(parseTimeValue('17%', rootRange)).toBe(2700); + }); + + it('handles percentages over 100%', () => { + expect(parseTimeValue('150%', rootRange)).toBe(16000); + }); + }); + + describe('error handling', () => { + it('throws on invalid seconds', () => { + expect(() => parseTimeValue('abc', rootRange)).toThrow( + 'Invalid time value' + ); + expect(() => parseTimeValue('', rootRange)).toThrow('Invalid time value'); + }); + + it('throws on invalid milliseconds', () => { + expect(() => parseTimeValue('abcms', rootRange)).toThrow( + 'Invalid milliseconds' + ); + expect(() => parseTimeValue('ms', rootRange)).toThrow( + 'Invalid milliseconds' + ); + }); + + it('throws on invalid percentages', () => { + expect(() => parseTimeValue('abc%', rootRange)).toThrow( + 'Invalid percentage' + ); + expect(() => parseTimeValue('%', rootRange)).toThrow( + 'Invalid percentage' + ); + }); + + it('throws on invalid seconds with suffix', () => { + expect(() => parseTimeValue('abcs', rootRange)).toThrow( + 'Invalid seconds' + ); + expect(() => parseTimeValue('s', rootRange)).toThrow('Invalid seconds'); + }); + }); + + describe('edge cases', () => { + it('handles negative values', () => { + expect(parseTimeValue('-1', rootRange)).toBe(0); + expect(parseTimeValue('-1s', rootRange)).toBe(0); + expect(parseTimeValue('-1000ms', rootRange)).toBe(0); + }); + + it('handles very large values', () => { + // 1000000 seconds = 1000000000ms, plus rootRange.start (1000ms) + expect(parseTimeValue('1000000', rootRange)).toBe(1000001000); + expect(parseTimeValue('1000000s', rootRange)).toBe(1000001000); + }); + + it('handles zero', () => { + expect(parseTimeValue('0', rootRange)).toBe(1000); + expect(parseTimeValue('0s', rootRange)).toBe(1000); + expect(parseTimeValue('0ms', rootRange)).toBe(1000); + expect(parseTimeValue('0%', rootRange)).toBe(1000); + }); + }); +}); diff --git a/src/test/unit/profile-query/timestamps.test.ts b/src/test/unit/profile-query/timestamps.test.ts new file mode 100644 index 0000000000..35113ab0b9 --- /dev/null +++ b/src/test/unit/profile-query/timestamps.test.ts @@ -0,0 +1,133 @@ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +import { TimestampManager } from 'firefox-profiler/profile-query/timestamps'; + +/** + * Unit tests for TimestampManager class. + */ + +describe('TimestampManager', function () { + describe('in-range timestamps', function () { + it('assigns short hierarchical names', function () { + const m = new TimestampManager({ start: 1000, end: 2000 }); + expect(m.nameForTimestamp(1000)).toBe('ts-0'); + expect(m.nameForTimestamp(2000)).toBe('ts-Z'); + expect(m.nameForTimestamp(1500)).toBe('ts-K'); + expect(m.nameForTimestamp(1002)).toBe('ts-1'); + expect(m.nameForTimestamp(1000.1)).toBe('ts-04'); + expect(m.nameForTimestamp(1001)).toBe('ts-0K'); + expect(m.nameForTimestamp(1006)).toBe('ts-2'); + }); + }); + + describe('before-range timestamps', function () { + it('uses ts< prefix with exponential buckets', function () { + const m = new TimestampManager({ start: 1000, end: 2000 }); + // Range length = 1000 + // ts<0 covers [0, 1000] (1×length before start) + // ts<1 covers [-1000, 0] (2×length before start) + // ts<2 covers [-3000, -1000] (4×length before start) + + // Timestamps in bucket 0 + expect(m.nameForTimestamp(500)).toMatch(/^ts<0/); + expect(m.nameForTimestamp(999)).toMatch(/^ts<0/); + + // Timestamps in bucket 1 + expect(m.nameForTimestamp(-500)).toMatch(/^ts<1/); + expect(m.nameForTimestamp(-999)).toMatch(/^ts<1/); + + // Timestamps in bucket 2 + expect(m.nameForTimestamp(-1500)).toMatch(/^ts<2/); + expect(m.nameForTimestamp(-2999)).toMatch(/^ts<2/); + }); + + it('creates hierarchical names within buckets', function () { + const m = new TimestampManager({ start: 1000, end: 2000 }); + // Request two timestamps and verify they get valid bucket-0 names + const name1 = m.nameForTimestamp(500); + const name2 = m.nameForTimestamp(250); + + expect(name1).toMatch(/^ts<0[0-9a-zA-Z]+$/); + expect(name2).toMatch(/^ts<0[0-9a-zA-Z]+$/); + + // They should be different names + expect(name1).not.toBe(name2); + }); + }); + + describe('after-range timestamps', function () { + it('uses ts> prefix with exponential buckets', function () { + const m = new TimestampManager({ start: 1000, end: 2000 }); + // Range length = 1000 + // ts>0 covers [2000, 3000] (1×length after end) + // ts>1 covers [3000, 4000] (2×length after end) + // ts>2 covers [4000, 6000] (4×length after end) + + // Timestamps in bucket 0 + expect(m.nameForTimestamp(2500)).toMatch(/^ts>0/); + expect(m.nameForTimestamp(2999)).toMatch(/^ts>0/); + + // Timestamps in bucket 1 + expect(m.nameForTimestamp(3500)).toMatch(/^ts>1/); + expect(m.nameForTimestamp(3999)).toMatch(/^ts>1/); + + // Timestamps in bucket 2 + expect(m.nameForTimestamp(5000)).toMatch(/^ts>2/); + expect(m.nameForTimestamp(5999)).toMatch(/^ts>2/); + }); + + it('creates hierarchical names within buckets', function () { + const m = new TimestampManager({ start: 1000, end: 2000 }); + // Request two timestamps and verify they get valid bucket-0 names + const name1 = m.nameForTimestamp(2500); + const name2 = m.nameForTimestamp(2750); + + expect(name1).toMatch(/^ts>0[0-9a-zA-Z]+$/); + expect(name2).toMatch(/^ts>0[0-9a-zA-Z]+$/); + + // They should be different names + expect(name1).not.toBe(name2); + }); + }); + + describe('reverse lookup', function () { + it('returns timestamps for names that were previously minted', function () { + const m = new TimestampManager({ start: 1000, end: 2000 }); + + // Mint some names + const name1 = m.nameForTimestamp(1000); + const name2 = m.nameForTimestamp(1500); + const name3 = m.nameForTimestamp(500); + const name4 = m.nameForTimestamp(2500); + + // Reverse lookup should work + expect(m.timestampForName(name1)).toBe(1000); + expect(m.timestampForName(name2)).toBe(1500); + expect(m.timestampForName(name3)).toBe(500); + expect(m.timestampForName(name4)).toBe(2500); + }); + + it('returns null for unknown names', function () { + const m = new TimestampManager({ start: 1000, end: 2000 }); + expect(m.timestampForName('ts-X')).toBe(null); + expect(m.timestampForName('ts<0Y')).toBe(null); + expect(m.timestampForName('unknown')).toBe(null); + }); + + it('handles repeated requests for the same timestamp', function () { + const m = new TimestampManager({ start: 1000, end: 2000 }); + + // Request same timestamp twice + const name1 = m.nameForTimestamp(1500); + const name2 = m.nameForTimestamp(1500); + + // Should get the same name + expect(name1).toBe(name2); + + // Reverse lookup should work + expect(m.timestampForName(name1)).toBe(1500); + }); + }); +});