Skip to content

Commit 6801983

Browse files
author
ChidcGithub
committed
Release v0.0.2 - Phase 6 complete: local models & observability
Features: - Add LlamaCppProvider for llama.cpp GGUF model support - Add QuantizationConfig for quantization settings - Add observability module: logging, metrics, tracing - Add OpenTelemetry exporter integration - Fix thread safety in CognitiveContext (threading.local) - Fix various bugs in runtime, scheduler, validation Bug fixes: - determinism.py: fix indentation in deterministic_call - runtime.py: thread-safe context with stack structure - scheduler.py: add _current_coro attribute to ScheduledTask - validation.py: add max_results to FactCheckValidator - cli.py: add --api-key parameter to repl command - pyproject.toml: add aiohttp dependency Tests: 241 passed, 2 skipped
1 parent 8037f5a commit 6801983

13 files changed

Lines changed: 2755 additions & 46 deletions

codegnipy/__init__.py

Lines changed: 35 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -88,6 +88,8 @@
8888
AnthropicProvider,
8989
OllamaProvider,
9090
TransformersProvider,
91+
LlamaCppProvider,
92+
QuantizationConfig,
9193
ProviderFactory,
9294
create_provider
9395
)
@@ -104,6 +106,22 @@
104106
verify_claim,
105107
verify_claim_async
106108
)
109+
from .observability import (
110+
LogLevel,
111+
MetricType,
112+
SpanContext,
113+
Metric,
114+
CognitiveLogger,
115+
MetricsCollector,
116+
Tracer,
117+
OpenTelemetryExporter,
118+
ObservabilityManager,
119+
traced,
120+
logged,
121+
metered,
122+
get_default_manager,
123+
configure_observability,
124+
)
107125

108126
__all__ = [
109127
# Core
@@ -173,6 +191,8 @@
173191
"AnthropicProvider",
174192
"OllamaProvider",
175193
"TransformersProvider",
194+
"LlamaCppProvider",
195+
"QuantizationConfig",
176196
"ProviderFactory",
177197
"create_provider",
178198
# Validation
@@ -187,4 +207,19 @@
187207
"create_default_validator",
188208
"verify_claim",
189209
"verify_claim_async",
210+
# Observability
211+
"LogLevel",
212+
"MetricType",
213+
"SpanContext",
214+
"Metric",
215+
"CognitiveLogger",
216+
"MetricsCollector",
217+
"Tracer",
218+
"OpenTelemetryExporter",
219+
"ObservabilityManager",
220+
"traced",
221+
"logged",
222+
"metered",
223+
"get_default_manager",
224+
"configure_observability",
190225
]

codegnipy/cli.py

Lines changed: 10 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -43,6 +43,10 @@ def create_parser() -> argparse.ArgumentParser:
4343
default="gpt-4o-mini",
4444
help="使用的 LLM 模型"
4545
)
46+
repl_parser.add_argument(
47+
"--api-key", "-k",
48+
help="API 密钥(也可通过环境变量 OPENAI_API_KEY 设置)"
49+
)
4650

4751
# version 命令
4852
subparsers.add_parser("version", help="显示版本信息")
@@ -86,17 +90,17 @@ def run_file(filepath: str, model: str, api_key: Optional[str] = None):
8690
sys.exit(1)
8791

8892

89-
def start_repl(model: str):
93+
def start_repl(model: str, api_key: Optional[str] = None):
9094
"""启动交互式 REPL"""
9195
import code
92-
96+
9397
print("Codegnipy REPL")
9498
print(f"模型: {model}")
9599
print("输入 Python 代码,~\"prompt\" 语法将调用 LLM")
96100
print("输入 exit() 或 Ctrl+D 退出\n")
97-
101+
98102
# 创建上下文
99-
ctx = CognitiveContext(model=model)
103+
ctx = CognitiveContext(model=model, api_key=api_key)
100104
ctx.__enter__()
101105

102106
# 准备 REPL 环境
@@ -137,11 +141,11 @@ def main():
137141
"""主入口"""
138142
parser = create_parser()
139143
args = parser.parse_args()
140-
144+
141145
if args.command == "run":
142146
run_file(args.file, args.model, args.api_key)
143147
elif args.command == "repl":
144-
start_repl(args.model)
148+
start_repl(args.model, getattr(args, 'api_key', None))
145149
elif args.command == "version":
146150
print(f"Codegnipy v{codegnipy.__version__}")
147151
else:

codegnipy/determinism.py

Lines changed: 11 additions & 27 deletions
Original file line numberDiff line numberDiff line change
@@ -558,54 +558,34 @@ def set_external_validator(self, validator: "BaseValidator") -> None:
558558
# ============ 确定性认知调用 ============
559559

560560
def deterministic_call(
561-
562561
prompt: str,
563-
564562
constraint: TypeConstraint,
565-
566563
context: Optional["CognitiveContext"] = None,
567-
568564
*,
569-
570565
max_attempts: int = 3,
571-
572566
use_reflection: bool = False,
573-
574567
simulator: Optional[Simulator] = None
575-
576568
) -> ValidationResult:
577-
578569
"""
579-
580570
带类型约束的确定性认知调用
581571
582-
583-
584572
参数:
585-
586573
prompt: 提示文本
587-
588574
constraint: 类型约束
589-
590575
context: 认知上下文
591-
592576
max_attempts: 最大尝试次数
593-
594577
use_reflection: 是否使用反思
595-
596578
simulator: 模拟器(用于测试)
597579
598580
返回:
599-
600581
ValidationResult 对象
601-
602582
"""
603583
from .runtime import cognitive_call
604584
from .reflection import with_reflection
605-
585+
606586
# 构建带约束的提示
607587
constrained_prompt = f"{prompt}\n\n约束: {constraint.to_prompt()}\n\n请严格按照约束要求回答。"
608-
588+
609589
for attempt in range(max_attempts):
610590
# 获取响应
611591
if simulator and simulator.mode != SimulationMode.OFF:
@@ -616,16 +596,20 @@ def deterministic_call(
616596
response = result.corrected_response or result.original_response
617597
else:
618598
response = cognitive_call(constrained_prompt, context)
619-
599+
620600
# 验证响应
621601
validation = constraint.validate(response)
622-
602+
623603
if validation.status == ValidationStatus.VALID:
624604
return validation
625-
605+
626606
# 如果验证失败,添加反馈并重试
627607
if attempt < max_attempts - 1:
628608
error_feedback = "; ".join(validation.errors)
629-
constrained_prompt = f"{prompt}\n\n约束: {constraint.to_prompt()}\n\n上次的回答不符合要求,错误: {error_feedback}\n\n请修正后重新回答。"
630-
609+
constrained_prompt = (
610+
f"{prompt}\n\n约束: {constraint.to_prompt()}\n\n"
611+
f"上次的回答不符合要求,错误: {error_feedback}\n\n"
612+
f"请修正后重新回答。"
613+
)
614+
631615
return validation

0 commit comments

Comments
 (0)