Skip to content

Commit c68cda6

Browse files
committed
Cleanup
1 parent 4996eea commit c68cda6

File tree

5 files changed

+105
-110
lines changed

5 files changed

+105
-110
lines changed

apps/sim/app/workspace/[workspaceId]/w/[workflowId]/hooks/use-workflow-execution.ts

Lines changed: 4 additions & 61 deletions
Original file line numberDiff line numberDiff line change
@@ -1482,31 +1482,9 @@ export function useWorkflowExecution() {
14821482
const candidates = resolveStartCandidates(mergedStates, { execution: 'manual' })
14831483
const candidate = candidates.find((c) => c.blockId === blockId)
14841484

1485-
logger.info('Run-from-block trigger analysis', {
1486-
blockId,
1487-
blockType: workflowBlocks[blockId]?.type,
1488-
blockTriggerMode: workflowBlocks[blockId]?.triggerMode,
1489-
candidateFound: !!candidate,
1490-
candidatePath: candidate?.path,
1491-
allCandidates: candidates.map((c) => ({
1492-
blockId: c.blockId,
1493-
type: c.block.type,
1494-
path: c.path,
1495-
})),
1496-
})
1497-
14981485
if (candidate) {
1499-
const needsMockPayload = triggerNeedsMockPayload(candidate)
1500-
logger.info('Trigger mock payload check', {
1501-
needsMockPayload,
1502-
path: candidate.path,
1503-
isExternalTrigger: candidate.path === StartBlockPath.EXTERNAL_TRIGGER,
1504-
blockType: candidate.block.type,
1505-
})
1506-
1507-
if (needsMockPayload) {
1486+
if (triggerNeedsMockPayload(candidate)) {
15081487
workflowInput = extractTriggerMockPayload(candidate)
1509-
logger.info('Extracted mock payload for trigger block', { blockId, workflowInput })
15101488
} else if (
15111489
candidate.path === StartBlockPath.SPLIT_API ||
15121490
candidate.path === StartBlockPath.SPLIT_INPUT ||
@@ -1522,46 +1500,27 @@ export function useWorkflowExecution() {
15221500
})
15231501
if (Object.keys(testInput).length > 0) {
15241502
workflowInput = testInput
1525-
logger.info('Extracted test input for trigger block', { blockId, workflowInput })
15261503
}
15271504
}
15281505
}
15291506
} else {
1530-
// Fallback for trigger blocks not found in candidates
1531-
// This can happen when the block is a trigger by position (no incoming edges)
1532-
// but wasn't classified as a start candidate (e.g., triggerMode not set)
1507+
// Fallback: block is trigger by position but not classified as start candidate
15331508
const block = mergedStates[blockId]
15341509
if (block) {
15351510
const blockConfig = getBlock(block.type)
15361511
const hasTriggers = blockConfig?.triggers?.available?.length
15371512

15381513
if (hasTriggers || block.triggerMode) {
1539-
// Block has trigger capability - extract mock payload
1540-
const syntheticCandidate = {
1514+
workflowInput = extractTriggerMockPayload({
15411515
blockId,
15421516
block,
15431517
path: StartBlockPath.EXTERNAL_TRIGGER,
1544-
}
1545-
workflowInput = extractTriggerMockPayload(syntheticCandidate)
1546-
logger.info('Extracted mock payload for trigger block (fallback)', {
1547-
blockId,
1548-
blockType: block.type,
1549-
hasTriggers,
1550-
triggerMode: block.triggerMode,
1551-
workflowInput,
15521518
})
15531519
}
15541520
}
15551521
}
15561522
}
15571523

1558-
logger.info('Starting run-from-block execution', {
1559-
workflowId,
1560-
startBlockId: blockId,
1561-
isTriggerBlock,
1562-
hasInput: !!workflowInput,
1563-
})
1564-
15651524
setIsExecuting(true)
15661525
const executionId = uuidv4()
15671526
const accumulatedBlockLogs: BlockLog[] = []
@@ -1576,10 +1535,6 @@ export function useWorkflowExecution() {
15761535
sourceSnapshot: effectiveSnapshot,
15771536
input: workflowInput,
15781537
callbacks: {
1579-
onExecutionStarted: (data) => {
1580-
logger.info('Run-from-block execution started:', data)
1581-
},
1582-
15831538
onBlockStarted: (data) => {
15841539
activeBlocksSet.add(data.blockId)
15851540
setActiveBlocks(new Set(activeBlocksSet))
@@ -1702,25 +1657,17 @@ export function useWorkflowExecution() {
17021657
activeExecutionPath: Array.from(mergedExecutedBlocks),
17031658
}
17041659
setLastExecutionSnapshot(workflowId, updatedSnapshot)
1705-
logger.info('Updated execution snapshot after run-from-block', {
1706-
workflowId,
1707-
newBlocksExecuted: executedBlockIds.size,
1708-
})
17091660
}
17101661
},
17111662

17121663
onExecutionError: (data) => {
1713-
logger.error('Run-from-block execution error:', data.error)
1714-
1715-
// If block not found, the snapshot is stale - clear it
17161664
if (data.error?.includes('Block not found in workflow')) {
17171665
clearLastExecutionSnapshot(workflowId)
17181666
addNotification({
17191667
level: 'info',
17201668
message: 'Workflow was modified. Run the workflow again to refresh.',
17211669
workflowId,
17221670
})
1723-
logger.info('Cleared stale execution snapshot', { workflowId })
17241671
} else {
17251672
addNotification({
17261673
level: 'error',
@@ -1729,15 +1676,11 @@ export function useWorkflowExecution() {
17291676
})
17301677
}
17311678
},
1732-
1733-
onExecutionCancelled: () => {
1734-
logger.info('Run-from-block execution cancelled')
1735-
},
17361679
},
17371680
})
17381681
} catch (error) {
17391682
if ((error as Error).name !== 'AbortError') {
1740-
logger.error('Run-from-block execution failed:', error)
1683+
logger.error('Run-from-block failed:', error)
17411684
}
17421685
} finally {
17431686
setIsExecuting(false)

apps/sim/executor/execution/executor.ts

Lines changed: 0 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -145,9 +145,6 @@ export class DAGExecutor {
145145
effectiveStartBlockId,
146146
dirtySetSize: dirtySet.size,
147147
upstreamSetSize: upstreamSet.size,
148-
filteredBlockStatesCount: Object.keys(filteredBlockStates).length,
149-
totalBlocks: dag.nodes.size,
150-
dirtyBlocks: Array.from(dirtySet),
151148
})
152149

153150
// Remove incoming edges from non-dirty sources so convergent blocks don't wait for cached upstream
@@ -164,10 +161,6 @@ export class DAGExecutor {
164161

165162
for (const sourceId of nonDirtyIncoming) {
166163
node.incomingEdges.delete(sourceId)
167-
logger.debug('Removed non-dirty incoming edge for run-from-block', {
168-
nodeId,
169-
sourceId,
170-
})
171164
}
172165
}
173166

@@ -327,11 +320,6 @@ export class DAGExecutor {
327320

328321
if (isRegularBlock) {
329322
this.initializeStarterBlock(context, state, startBlockId)
330-
logger.info('Run-from-block mode: initialized start block', { startBlockId })
331-
} else {
332-
logger.info('Run-from-block mode: skipping starter block init for container/sentinel', {
333-
startBlockId,
334-
})
335323
}
336324
} else {
337325
this.initializeStarterBlock(context, state, triggerBlockId)

apps/sim/executor/utils/run-from-block.test.ts

Lines changed: 100 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,9 +1,16 @@
11
import { describe, expect, it } from 'vitest'
22
import type { DAG, DAGNode } from '@/executor/dag/builder'
33
import type { DAGEdge, NodeMetadata } from '@/executor/dag/types'
4-
import { computeDirtySet, validateRunFromBlock } from '@/executor/utils/run-from-block'
4+
import { computeExecutionSets, validateRunFromBlock } from '@/executor/utils/run-from-block'
55
import type { SerializedLoop, SerializedParallel } from '@/serializer/types'
66

7+
/**
8+
* Helper to extract dirty set from computeExecutionSets
9+
*/
10+
function computeDirtySet(dag: DAG, startBlockId: string): Set<string> {
11+
return computeExecutionSets(dag, startBlockId).dirtySet
12+
}
13+
714
/**
815
* Helper to create a DAG node for testing
916
*/
@@ -491,3 +498,95 @@ describe('computeDirtySet with containers', () => {
491498
expect(dirtySet.has('A')).toBe(false)
492499
})
493500
})
501+
502+
describe('computeExecutionSets upstream set', () => {
503+
it('includes all upstream blocks in linear workflow', () => {
504+
// A → B → C → D
505+
const dag = createDAG([
506+
createNode('A', [{ target: 'B' }]),
507+
createNode('B', [{ target: 'C' }]),
508+
createNode('C', [{ target: 'D' }]),
509+
createNode('D'),
510+
])
511+
512+
const { upstreamSet } = computeExecutionSets(dag, 'C')
513+
514+
expect(upstreamSet.has('A')).toBe(true)
515+
expect(upstreamSet.has('B')).toBe(true)
516+
expect(upstreamSet.has('C')).toBe(false) // start block not in upstream
517+
expect(upstreamSet.has('D')).toBe(false) // downstream
518+
})
519+
520+
it('includes all branches in convergent upstream', () => {
521+
// A → C
522+
// B → C → D
523+
const dag = createDAG([
524+
createNode('A', [{ target: 'C' }]),
525+
createNode('B', [{ target: 'C' }]),
526+
createNode('C', [{ target: 'D' }]),
527+
createNode('D'),
528+
])
529+
530+
const { upstreamSet } = computeExecutionSets(dag, 'C')
531+
532+
expect(upstreamSet.has('A')).toBe(true)
533+
expect(upstreamSet.has('B')).toBe(true)
534+
expect(upstreamSet.has('C')).toBe(false)
535+
expect(upstreamSet.has('D')).toBe(false)
536+
})
537+
538+
it('excludes parallel branches not in upstream path', () => {
539+
// A → B → D
540+
// A → C → D
541+
// Running from B: upstream is A only, not C
542+
const dag = createDAG([
543+
createNode('A', [{ target: 'B' }, { target: 'C' }]),
544+
createNode('B', [{ target: 'D' }]),
545+
createNode('C', [{ target: 'D' }]),
546+
createNode('D'),
547+
])
548+
549+
const { upstreamSet, dirtySet } = computeExecutionSets(dag, 'B')
550+
551+
// Upstream should only contain A
552+
expect(upstreamSet.has('A')).toBe(true)
553+
expect(upstreamSet.has('C')).toBe(false) // parallel branch, not upstream of B
554+
// Dirty should contain B and D
555+
expect(dirtySet.has('B')).toBe(true)
556+
expect(dirtySet.has('D')).toBe(true)
557+
expect(dirtySet.has('C')).toBe(false)
558+
})
559+
560+
it('handles diamond pattern upstream correctly', () => {
561+
// B
562+
// ↗ ↘
563+
// A D → E
564+
// ↘ ↗
565+
// C
566+
// Running from D: upstream should be A, B, C
567+
const dag = createDAG([
568+
createNode('A', [{ target: 'B' }, { target: 'C' }]),
569+
createNode('B', [{ target: 'D' }]),
570+
createNode('C', [{ target: 'D' }]),
571+
createNode('D', [{ target: 'E' }]),
572+
createNode('E'),
573+
])
574+
575+
const { upstreamSet, dirtySet } = computeExecutionSets(dag, 'D')
576+
577+
expect(upstreamSet.has('A')).toBe(true)
578+
expect(upstreamSet.has('B')).toBe(true)
579+
expect(upstreamSet.has('C')).toBe(true)
580+
expect(upstreamSet.has('D')).toBe(false)
581+
expect(dirtySet.has('D')).toBe(true)
582+
expect(dirtySet.has('E')).toBe(true)
583+
})
584+
585+
it('returns empty upstream set for root block', () => {
586+
const dag = createDAG([createNode('A', [{ target: 'B' }]), createNode('B')])
587+
588+
const { upstreamSet } = computeExecutionSets(dag, 'A')
589+
590+
expect(upstreamSet.size).toBe(0)
591+
})
592+
})

apps/sim/executor/utils/run-from-block.ts

Lines changed: 0 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -1,9 +1,6 @@
1-
import { createLogger } from '@sim/logger'
21
import { LOOP, PARALLEL } from '@/executor/constants'
32
import type { DAG } from '@/executor/dag/builder'
43

5-
const logger = createLogger('run-from-block')
6-
74
/**
85
* Builds the sentinel-start node ID for a loop.
96
*/
@@ -112,30 +109,9 @@ export function computeExecutionSets(dag: DAG, startBlockId: string): ExecutionS
112109
}
113110
}
114111

115-
logger.debug('Computed execution sets', {
116-
startBlockId,
117-
traversalStartId,
118-
dirtySetSize: dirty.size,
119-
upstreamSetSize: upstream.size,
120-
})
121-
122112
return { dirtySet: dirty, upstreamSet: upstream }
123113
}
124114

125-
/**
126-
* @deprecated Use computeExecutionSets instead for combined computation
127-
*/
128-
export function computeDirtySet(dag: DAG, startBlockId: string): Set<string> {
129-
return computeExecutionSets(dag, startBlockId).dirtySet
130-
}
131-
132-
/**
133-
* @deprecated Use computeExecutionSets instead for combined computation
134-
*/
135-
export function computeUpstreamSet(dag: DAG, blockId: string): Set<string> {
136-
return computeExecutionSets(dag, blockId).upstreamSet
137-
}
138-
139115
/**
140116
* Validates that a block can be used as a run-from-block starting point.
141117
*

apps/sim/lib/workflows/executor/execution-core.ts

Lines changed: 1 addition & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -27,10 +27,7 @@ import type {
2727
} from '@/executor/execution/types'
2828
import type { ExecutionResult, NormalizedBlockOutput } from '@/executor/types'
2929
import { hasExecutionResult } from '@/executor/utils/errors'
30-
import {
31-
buildParallelSentinelEndId,
32-
buildSentinelEndId,
33-
} from '@/executor/utils/subflow-utils'
30+
import { buildParallelSentinelEndId, buildSentinelEndId } from '@/executor/utils/subflow-utils'
3431
import { Serializer } from '@/serializer'
3532

3633
const logger = createLogger('ExecutionCore')
@@ -264,16 +261,8 @@ export async function executeWorkflowCore(
264261
if (stopAfterBlockId) {
265262
if (serializedWorkflow.loops?.[stopAfterBlockId]) {
266263
resolvedStopAfterBlockId = buildSentinelEndId(stopAfterBlockId)
267-
logger.info(`[${requestId}] Resolved loop container to sentinel-end`, {
268-
original: stopAfterBlockId,
269-
resolved: resolvedStopAfterBlockId,
270-
})
271264
} else if (serializedWorkflow.parallels?.[stopAfterBlockId]) {
272265
resolvedStopAfterBlockId = buildParallelSentinelEndId(stopAfterBlockId)
273-
logger.info(`[${requestId}] Resolved parallel container to sentinel-end`, {
274-
original: stopAfterBlockId,
275-
resolved: resolvedStopAfterBlockId,
276-
})
277266
}
278267
}
279268

0 commit comments

Comments
 (0)