Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
13 changes: 13 additions & 0 deletions examples/lang-chain/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
### LangChain / LangGraph examples (Python)

These examples show how to use Sentience as a **tool layer** inside LangChain and LangGraph.

Install:

```bash
pip install sentienceapi[langchain]
```

Examples:
- `langchain_tools_demo.py`: build a Sentience tool pack for LangChain
- `langgraph_self_correcting_graph.py`: observe → act → verify → branch (retry) template
41 changes: 41 additions & 0 deletions examples/lang-chain/langchain_tools_demo.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,41 @@
"""
Example: Build Sentience LangChain tools (async-only).

Install:
pip install sentienceapi[langchain]

Run:
python examples/lang-chain/langchain_tools_demo.py

Notes:
- This example focuses on creating the tools. Hook them into your agent of choice.
"""

from __future__ import annotations

import asyncio

from sentience import AsyncSentienceBrowser
from sentience.integrations.langchain import (
SentienceLangChainContext,
build_sentience_langchain_tools,
)


async def main() -> None:
browser = AsyncSentienceBrowser(headless=False)
await browser.start()
await browser.goto("https://example.com")

ctx = SentienceLangChainContext(browser=browser)
tools = build_sentience_langchain_tools(ctx)

print("Registered tools:")
for t in tools:
print(f"- {t.name}")

await browser.close()


if __name__ == "__main__":
asyncio.run(main())
80 changes: 80 additions & 0 deletions examples/lang-chain/langgraph_self_correcting_graph.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,80 @@
"""
LangGraph reference example: Sentience observe → act → verify → branch (self-correcting).

Install:
pip install sentienceapi[langchain]

Run:
python examples/lang-chain/langgraph_self_correcting_graph.py
"""

from __future__ import annotations

import asyncio
from dataclasses import dataclass

from sentience import AsyncSentienceBrowser
from sentience.integrations.langchain import SentienceLangChainContext, SentienceLangChainCore


@dataclass
class State:
url: str | None = None
last_action: str | None = None
attempts: int = 0
done: bool = False


async def main() -> None:
from langgraph.graph import END, StateGraph

browser = AsyncSentienceBrowser(headless=False)
await browser.start()

core = SentienceLangChainCore(SentienceLangChainContext(browser=browser))

async def observe(state: State) -> State:
s = await core.snapshot_state()
state.url = s.url
return state

async def act(state: State) -> State:
# Replace with an LLM decision node. For demo we just navigate once.
if state.attempts == 0:
await core.navigate("https://example.com")
state.last_action = "navigate"
else:
state.last_action = "noop"
state.attempts += 1
return state

async def verify(state: State) -> State:
out = await core.verify_url_matches(r"example\.com")
state.done = bool(out.passed)
return state

def branch(state: State) -> str:
if state.done:
return "done"
if state.attempts >= 3:
return "done"
return "retry"

g = StateGraph(State)
g.add_node("observe", observe)
g.add_node("act", act)
g.add_node("verify", verify)
g.set_entry_point("observe")
g.add_edge("observe", "act")
g.add_edge("act", "verify")
g.add_conditional_edges("verify", branch, {"retry": "observe", "done": END})
app = g.compile()

final = await app.ainvoke(State())
print(final)

await browser.close()


if __name__ == "__main__":
asyncio.run(main())
88 changes: 88 additions & 0 deletions examples/langgraph/sentience_self_correcting_graph.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,88 @@
"""
LangGraph reference example: Sentience observe → act → verify → branch (self-correcting).

Install:
pip install sentienceapi[langchain]

Run:
python examples/langgraph/sentience_self_correcting_graph.py

Notes:
- This is a template demonstrating control flow; you can replace the "decide" node
with an LLM step (LangChain) that chooses actions based on snapshot_state/read_page.
"""

from __future__ import annotations

import asyncio
from dataclasses import dataclass
from typing import Optional

from sentience import AsyncSentienceBrowser
from sentience.integrations.langchain import SentienceLangChainContext, SentienceLangChainCore


@dataclass
class State:
url: str | None = None
last_action: str | None = None
attempts: int = 0
done: bool = False


async def main() -> None:
# Lazy import so the file can exist without langgraph installed
from langgraph.graph import END, StateGraph

browser = AsyncSentienceBrowser(headless=False)
await browser.start()

core = SentienceLangChainCore(SentienceLangChainContext(browser=browser))

async def observe(state: State) -> State:
s = await core.snapshot_state()
state.url = s.url
return state

async def act(state: State) -> State:
# Replace this with an LLM-driven decision. For demo purposes, we just navigate once.
if state.attempts == 0:
await core.navigate("https://example.com")
state.last_action = "navigate"
else:
state.last_action = "noop"
state.attempts += 1
return state

async def verify(state: State) -> State:
# Guard condition: URL should contain example.com
out = await core.verify_url_matches(r"example\.com")
state.done = bool(out.passed)
return state

def should_continue(state: State) -> str:
# Self-correcting loop: retry observe→act→verify up to 3 attempts
if state.done:
return "done"
if state.attempts >= 3:
return "done"
return "retry"

g = StateGraph(State)
g.add_node("observe", observe)
g.add_node("act", act)
g.add_node("verify", verify)
g.set_entry_point("observe")
g.add_edge("observe", "act")
g.add_edge("act", "verify")
g.add_conditional_edges("verify", should_continue, {"retry": "observe", "done": END})
app = g.compile()

final = await app.ainvoke(State())
print(final)

await browser.close()


if __name__ == "__main__":
asyncio.run(main())
86 changes: 63 additions & 23 deletions sentience/cloud_tracing.py
Original file line number Diff line number Diff line change
Expand Up @@ -148,40 +148,80 @@ def close(

self._closed = True

# Flush and sync file to disk before closing to ensure all data is written
# This is critical on CI systems where file system operations may be slower
self._trace_file.flush()
if not blocking:
# Fire-and-forget background finalize+upload.
#
# IMPORTANT: for truly non-blocking close, we avoid synchronous work here
# (flush/fsync/index generation). That work happens in the background thread.
thread = threading.Thread(
target=self._close_and_upload_background,
args=(on_progress,),
daemon=True,
)
thread.start()
return # Return immediately

# Blocking mode: finalize trace file and upload now.
if not self._finalize_trace_file_for_upload():
return
self._do_upload(on_progress)

def _finalize_trace_file_for_upload(self) -> bool:
"""
Finalize the local trace file so it is ready for upload.

Returns:
True if there is data to upload, False if the trace is empty/missing.
"""
# Flush and sync file to disk before closing to ensure all data is written.
# This can be slow on CI file systems; in non-blocking close we do this in background.
try:
self._trace_file.flush()
except Exception:
pass
try:
# Force OS to write buffered data to disk
os.fsync(self._trace_file.fileno())
except (OSError, AttributeError):
# Some file handles don't support fsync (e.g., StringIO in tests)
# This is fine - flush() is usually sufficient
# Some file handles don't support fsync; flush is usually sufficient.
pass
try:
self._trace_file.close()
except Exception:
pass
self._trace_file.close()

# Ensure file exists and has content before proceeding
if not self._path.exists() or self._path.stat().st_size == 0:
# No events were emitted, nothing to upload
if self.logger:
self.logger.warning("No trace events to upload (file is empty or missing)")
return
try:
if not self._path.exists() or self._path.stat().st_size == 0:
if self.logger:
self.logger.warning("No trace events to upload (file is empty or missing)")
return False
except Exception:
# If we can't stat, don't attempt upload
return False

# Generate index after closing file
self._generate_index()
return True

if not blocking:
# Fire-and-forget background upload
thread = threading.Thread(
target=self._do_upload,
args=(on_progress,),
daemon=True,
)
thread.start()
return # Return immediately
def _close_and_upload_background(
self, on_progress: Callable[[int, int], None] | None = None
) -> None:
"""
Background worker for non-blocking close.

# Blocking mode
self._do_upload(on_progress)
Performs file finalization + index generation + upload.
"""
try:
if not self._finalize_trace_file_for_upload():
return
self._do_upload(on_progress)
except Exception as e:
# Non-fatal: preserve trace locally
self._upload_successful = False
print(f"❌ [Sentience] Error uploading trace (background): {e}")
print(f" Local trace preserved at: {self._path}")
if self.logger:
self.logger.error(f"Error uploading trace (background): {e}")

def _do_upload(self, on_progress: Callable[[int, int], None] | None = None) -> None:
"""
Expand Down
12 changes: 12 additions & 0 deletions sentience/integrations/langchain/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,12 @@
"""
LangChain / LangGraph integration helpers (optional).

This package is designed so the base SDK can be imported without LangChain installed.
All LangChain imports are done lazily inside tool-builder functions.
"""

from .context import SentienceLangChainContext
from .core import SentienceLangChainCore
from .tools import build_sentience_langchain_tools

__all__ = ["SentienceLangChainContext", "SentienceLangChainCore", "build_sentience_langchain_tools"]
18 changes: 18 additions & 0 deletions sentience/integrations/langchain/context.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,18 @@
from __future__ import annotations

from dataclasses import dataclass

from sentience.browser import AsyncSentienceBrowser
from sentience.tracing import Tracer


@dataclass
class SentienceLangChainContext:
"""
Context for LangChain/LangGraph integrations.

We keep this small and explicit; it mirrors the PydanticAI deps object.
"""

browser: AsyncSentienceBrowser
tracer: Tracer | None = None
Loading