Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
7 changes: 6 additions & 1 deletion openevolve/controller.py
Original file line number Diff line number Diff line change
Expand Up @@ -263,13 +263,18 @@ async def run(
# Get artifacts for the parent program if available
parent_artifacts = self.database.get_artifacts(parent.id)

# Get actual top programs for prompt context (separate from inspirations)
# This ensures the LLM sees only high-performing programs as examples
actual_top_programs = self.database.get_top_programs(5)

# Build prompt
prompt = self.prompt_sampler.build_prompt(
current_program=parent.code,
parent_program=parent.code, # We don't have the parent's code, use the same
program_metrics=parent.metrics,
previous_programs=[p.to_dict() for p in self.database.get_top_programs(3)],
top_programs=[p.to_dict() for p in inspirations],
top_programs=[p.to_dict() for p in actual_top_programs], # Use actual top programs
inspirations=[p.to_dict() for p in inspirations], # Pass inspirations separately
language=self.language,
evolution_round=i,
diff_based_evolution=self.config.diff_based_evolution,
Expand Down
146 changes: 144 additions & 2 deletions openevolve/prompt/sampler.py
Original file line number Diff line number Diff line change
Expand Up @@ -51,6 +51,7 @@ def build_prompt(
program_metrics: Dict[str, float] = {},
previous_programs: List[Dict[str, Any]] = [],
top_programs: List[Dict[str, Any]] = [],
inspirations: List[Dict[str, Any]] = [], # Add inspirations parameter
language: str = "python",
evolution_round: int = 0,
diff_based_evolution: bool = True,
Expand All @@ -66,7 +67,8 @@ def build_prompt(
parent_program: Parent program from which current was derived
program_metrics: Dictionary of metric names to values
previous_programs: List of previous program attempts
top_programs: List of top-performing programs
top_programs: List of top-performing programs (best by fitness)
inspirations: List of inspiration programs (diverse/creative examples)
language: Programming language
evolution_round: Current evolution round
diff_based_evolution: Whether to use diff-based evolution (True) or full rewrites (False)
Expand Down Expand Up @@ -110,7 +112,7 @@ def build_prompt(

# Format evolution history
evolution_history = self._format_evolution_history(
previous_programs, top_programs, language
previous_programs, top_programs, inspirations, language
)

# Format artifacts section if enabled and available
Expand Down Expand Up @@ -227,6 +229,7 @@ def _format_evolution_history(
self,
previous_programs: List[Dict[str, Any]],
top_programs: List[Dict[str, Any]],
inspirations: List[Dict[str, Any]],
language: str,
) -> str:
"""Format the evolution history for the prompt"""
Expand Down Expand Up @@ -391,11 +394,150 @@ def _format_evolution_history(
# Combine top and diverse programs
combined_programs_str = top_programs_str + diverse_programs_str

# Format inspirations section
inspirations_section_str = self._format_inspirations_section(inspirations, language)

# Combine into full history
return history_template.format(
previous_attempts=previous_attempts_str.strip(),
top_programs=combined_programs_str.strip(),
inspirations_section=inspirations_section_str,
)

def _format_inspirations_section(
self, inspirations: List[Dict[str, Any]], language: str
) -> str:
"""
Format the inspirations section for the prompt

Args:
inspirations: List of inspiration programs
language: Programming language

Returns:
Formatted inspirations section string
"""
if not inspirations:
return ""

# Get templates
inspirations_section_template = self.template_manager.get_template("inspirations_section")
inspiration_program_template = self.template_manager.get_template("inspiration_program")

inspiration_programs_str = ""

for i, program in enumerate(inspirations):
# Extract a snippet (first 8 lines) for display
program_code = program.get("code", "")
program_snippet = "\n".join(program_code.split("\n")[:8])
if len(program_code.split("\n")) > 8:
program_snippet += "\n# ... (truncated for brevity)"

# Calculate a composite score using safe numeric average
score = safe_numeric_average(program.get("metrics", {}))

# Determine program type based on metadata and score
program_type = self._determine_program_type(program)

# Extract unique features (emphasizing diversity rather than just performance)
unique_features = self._extract_unique_features(program)

inspiration_programs_str += (
inspiration_program_template.format(
program_number=i + 1,
score=f"{score:.4f}",
program_type=program_type,
language=language,
program_snippet=program_snippet,
unique_features=unique_features,
)
+ "\n\n"
)

return inspirations_section_template.format(
inspiration_programs=inspiration_programs_str.strip()
)

def _determine_program_type(self, program: Dict[str, Any]) -> str:
"""
Determine the type/category of an inspiration program

Args:
program: Program dictionary

Returns:
String describing the program type
"""
metadata = program.get("metadata", {})
score = safe_numeric_average(program.get("metrics", {}))

# Check metadata for explicit type markers
if metadata.get("diverse", False):
return "Diverse"
if metadata.get("migrant", False):
return "Migrant"
if metadata.get("random", False):
return "Random"

# Classify based on score ranges
if score >= 0.8:
return "High-Performer"
elif score >= 0.6:
return "Alternative"
elif score >= 0.4:
return "Experimental"
else:
return "Exploratory"

def _extract_unique_features(self, program: Dict[str, Any]) -> str:
"""
Extract unique features of an inspiration program

Args:
program: Program dictionary

Returns:
String describing unique aspects of the program
"""
features = []

# Extract from metadata if available
metadata = program.get("metadata", {})
if "changes" in metadata:
changes = metadata["changes"]
if isinstance(changes, str) and len(changes) < 100:
features.append(f"Modification: {changes}")

# Analyze metrics for standout characteristics
metrics = program.get("metrics", {})
for metric_name, value in metrics.items():
if isinstance(value, (int, float)):
if value >= 0.9:
features.append(f"Excellent {metric_name} ({value:.3f})")
elif value <= 0.3:
features.append(f"Alternative {metric_name} approach")

# Code-based features (simple heuristics)
code = program.get("code", "")
if code:
code_lower = code.lower()
if "class" in code_lower and "def __init__" in code_lower:
features.append("Object-oriented approach")
if "numpy" in code_lower or "np." in code_lower:
features.append("NumPy-based implementation")
if "for" in code_lower and "while" in code_lower:
features.append("Mixed iteration strategies")
if len(code.split("\n")) < 10:
features.append("Concise implementation")
elif len(code.split("\n")) > 50:
features.append("Comprehensive implementation")

# Default if no specific features found
if not features:
program_type = self._determine_program_type(program)
features.append(f"{program_type} approach to the problem")

return ", ".join(features[:3]) # Limit to top 3 features

def _apply_template_variations(self, template: str) -> str:
"""Apply stochastic variations to the template"""
Expand Down
20 changes: 20 additions & 0 deletions openevolve/prompt/templates.py
Original file line number Diff line number Diff line change
Expand Up @@ -96,6 +96,8 @@
## Top Performing Programs

{top_programs}

{inspirations_section}
"""

# Template for formatting a previous attempt
Expand All @@ -113,6 +115,22 @@
Key features: {key_features}
"""

# Template for formatting inspirations section
INSPIRATIONS_SECTION_TEMPLATE = """## Inspiration Programs

These programs represent diverse approaches and creative solutions that may inspire new ideas:

{inspiration_programs}
"""

# Template for formatting an individual inspiration program
INSPIRATION_PROGRAM_TEMPLATE = """### Inspiration {program_number} (Score: {score}, Type: {program_type})
```{language}
{program_snippet}
```
Unique approach: {unique_features}
"""

# Template for evaluating a program via an LLM
EVALUATION_TEMPLATE = """Evaluate the following code on a scale of 0.0 to 1.0 for the following metrics:
1. Readability: How easy is the code to read and understand?
Expand Down Expand Up @@ -144,6 +162,8 @@
"evolution_history": EVOLUTION_HISTORY_TEMPLATE,
"previous_attempt": PREVIOUS_ATTEMPT_TEMPLATE,
"top_program": TOP_PROGRAM_TEMPLATE,
"inspirations_section": INSPIRATIONS_SECTION_TEMPLATE,
"inspiration_program": INSPIRATION_PROGRAM_TEMPLATE,
"evaluation": EVALUATION_TEMPLATE,
}

Expand Down
2 changes: 1 addition & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"

[project]
name = "openevolve"
version = "0.0.7"
version = "0.0.8"
description = "Open-source implementation of AlphaEvolve"
readme = "README.md"
requires-python = ">=3.9"
Expand Down
2 changes: 1 addition & 1 deletion setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@

setup(
name="openevolve",
version="0.0.7",
version="0.0.8",
packages=find_packages(),
include_package_data=True,
)