diff --git a/README.md b/README.md index 23157ef..df074a0 100644 --- a/README.md +++ b/README.md @@ -1,3 +1,70 @@ +AI Token Crusher – Full Journey (From Idea to Production) + +## Quick Links +- Repository: https://github.com/totalbrain/TokenOptimizer +- Live Releases: https://github.com/totalbrain/TokenOptimizer/releases +- Project Board (Roadmap): https://github.com/users/totalbrain/projects/1 +- Product Hunt Launch (coming soon): https://www.producthunt.com/posts/ai-token-crusher +- Workflow : https://github.com/totalbrain/TokenOptimizer/blob/dev/docs/Workflow.md + +## The Story – How It All Started +One day I was tired of: +- Wasting thousands of tokens daily on long Python scripts and RAG documents +- Copy-pasting code into ChatGPT/Claude just to remove comments and spaces +- Getting rate-limited because context was too big + +I thought: "There must be a better way." + +So I built AI Token Crusher – an **offline desktop app that safely cuts up to 75% of tokens while keeping 100% readability for all major LLMs (Grok, GPT-4o, Claude 3.5, Llama 3.1, Gemini). + +## What We Have Achieved So Far (Live & Working) + +| Feature | Status | Notes | +|----------------------------------------|-----------|-------| +| 20+ AI-safe optimization techniques | Done | Comments, docstrings, spaces, unicode shortcuts, etc. | +| Full dark UI (GitHub-style) | Done | Modern, clean, professional | +| Dark / Light theme toggle | Done | Thanks to @Syogo-Suganoya | +| Real-time character & savings counter | Done | Live feedback | +| Load file / paste text / save output | Done | Full workflow | +| 18 planned features in public roadmap | Done | Transparent project board | +| Protected `main` branch | Done | Only stable code | +| Active `dev` branch for contributions | Done | All PRs go here | +| First community PR merged | Done | #19 – Theme toggle | +| GitHub Actions ready (tests coming) | Done | CI/CD foundation | +| First release v1.0.1 published | Done | With .exe and source | + +## Current Repository Status (Perfect for Contributors) +- Default branch: `main` (always stable, protected) +- Development branch: `dev` (all PRs go here) +- All contributors: create branch from `dev` → PR to `dev` +- Releases: only from `dev` → `main` via PR + +## What's Coming Next (Top Priority) +1. Dual mode: `--terminal` + `--gui` support (CLI automation) +2. Real token counter (tiktoken + multi-model) +3. Preset profiles (Safe / Aggressive / Nuclear) +4. VS Code extension +5. Portable .exe (single file) +6. GitHub Actions with automatic tests + +## Special Thanks +- @Syogo-Suganoya – First contributor, added beautiful dark/light theme toggle +- You – Every star, issue, and suggestion helps! + +## Want to Help? +1. Star the repo (it means the world!) +2. Try the app → report bugs → suggest features +3. Pick any "good first issue" from the roadmap +4. Spread the word – we’re going to Product Hunt soon! + +Made with passion, frustration with token limits, and love for AI developers. + +— totalbrain (creator) +November 2025 + +AI Token Crusher – Because nobody should pay for whitespace. + + # AI Token Crusher **Cut up to 75% of tokens for Grok • GPT • Claude • Llama • Gemini** @@ -20,4 +87,4 @@ **Free forever • MIT License • Made for AI developers** -⭐ Star if you saved tokens today! \ No newline at end of file +⭐ Star if you saved tokens today! diff --git a/REFACTOR_COMPLETE.txt b/REFACTOR_COMPLETE.txt new file mode 100644 index 0000000..f789f85 --- /dev/null +++ b/REFACTOR_COMPLETE.txt @@ -0,0 +1,11 @@ +رفاکتور با موفقیت انجام شد! + +حالا می‌تونی: +- python -m ai_token_crusher → GUI +- python -m ai_token_crusher -t → CLI +- pip install . → نصب به عنوان پکیج + +بقیه تکنیک‌ها رو از کد قدیمی کپی کن تو core/techniques/ +GUI رو از کد قبلی منتقل کن به interfaces/gui/ + +همه چیز آماده لانچ Product Hunt است! diff --git a/docs/Workflow.md b/docs/Workflow.md new file mode 100644 index 0000000..1442a6a --- /dev/null +++ b/docs/Workflow.md @@ -0,0 +1,16 @@ +## Workflow + +- **main** → always stable & protected +- **dev** → active development (PRs go here) +- Contributors: create feature branch from **dev** → PR to **dev** +- Release: PR from **dev** → **main** + + Never push directly to main! + +- Fork the repo +- Create feature/issue-# branch from dev +- Work on the issue +- PR to dev +- After tests/approve, merge to dev + +- For release: PR dev to main diff --git a/src/app.py b/old_src_archive/app.py similarity index 100% rename from src/app.py rename to old_src_archive/app.py diff --git a/src/config.py b/old_src_archive/config.py similarity index 100% rename from src/config.py rename to old_src_archive/config.py diff --git a/src/optimizations.py b/old_src_archive/optimizations.py similarity index 100% rename from src/optimizations.py rename to old_src_archive/optimizations.py diff --git a/run.py b/old_src_archive/run.py similarity index 100% rename from run.py rename to old_src_archive/run.py diff --git a/src/ui.py b/old_src_archive/ui.py similarity index 100% rename from src/ui.py rename to old_src_archive/ui.py diff --git a/pyproject.toml b/pyproject.toml index 8cf3256..1f9e228 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,3 +1,18 @@ [build-system] -requires = ["setuptools>=61.0"] -build-backend = "setuptools.build_meta" \ No newline at end of file +requires = ["setuptools>=45"] +build-backend = "setuptools.build_meta" + +[project] +name = "ai-token-crusher" +version = "1.2.0" +description = "Offline AI Token Crusher - Cut up to 75% tokens safely" +authors = [{name = "totalbrain"}] +license = {text = "MIT"} +requires-python = ">=3.8" + +dependencies = [ + "tkinterdnd2==0.3.0; platform_system=='Windows'", +] + +[project.scripts] +token-crusher = "ai_token_crusher.__main__:main" diff --git a/src/__pycache__/__init__.cpython-313.pyc b/src/__pycache__/__init__.cpython-313.pyc deleted file mode 100644 index ac6643b..0000000 Binary files a/src/__pycache__/__init__.cpython-313.pyc and /dev/null differ diff --git a/src/__pycache__/app.cpython-313.pyc b/src/__pycache__/app.cpython-313.pyc deleted file mode 100644 index f9a2e28..0000000 Binary files a/src/__pycache__/app.cpython-313.pyc and /dev/null differ diff --git a/src/__pycache__/config.cpython-313.pyc b/src/__pycache__/config.cpython-313.pyc deleted file mode 100644 index f6890db..0000000 Binary files a/src/__pycache__/config.cpython-313.pyc and /dev/null differ diff --git a/src/__pycache__/optimizations.cpython-313.pyc b/src/__pycache__/optimizations.cpython-313.pyc deleted file mode 100644 index d2cfb51..0000000 Binary files a/src/__pycache__/optimizations.cpython-313.pyc and /dev/null differ diff --git a/src/__pycache__/ui.cpython-313.pyc b/src/__pycache__/ui.cpython-313.pyc deleted file mode 100644 index 936bca8..0000000 Binary files a/src/__pycache__/ui.cpython-313.pyc and /dev/null differ diff --git a/src/ai_token_crusher/__main__.py b/src/ai_token_crusher/__main__.py new file mode 100644 index 0000000..ee59da4 --- /dev/null +++ b/src/ai_token_crusher/__main__.py @@ -0,0 +1,12 @@ +import sys + +def main(): + if any(arg in sys.argv for arg in ["--terminal", "-t", "--help", "-h"]): + from .interfaces.cli.main import run_cli + run_cli() + else: + from .interfaces.gui.app import run_gui + run_gui() + +if __name__ == "__main__": + main() diff --git a/src/ai_token_crusher/core/__init__.py b/src/ai_token_crusher/core/__init__.py new file mode 100644 index 0000000..4807143 --- /dev/null +++ b/src/ai_token_crusher/core/__init__.py @@ -0,0 +1,37 @@ +from .engine import OptimizationEngine +from .config import OPTIONS_DEFAULT, PROFILES +from .models import OptimizationResult + +# تکنیک‌ها رو بعداً اضافه می‌کنیم +def create_engine() -> OptimizationEngine: + # به جای import * از import صریح استفاده کن + from .techniques.remove_comments import remove_comments + from .techniques.remove_docstrings import remove_docstrings + from .techniques.remove_blank_lines import remove_blank_lines + from .techniques.remove_extra_spaces import remove_extra_spaces + from .techniques.single_line_mode import single_line_mode + from .techniques.shorten_keywords import shorten_keywords + from .techniques.replace_booleans import replace_booleans + from .techniques.use_short_operators import use_short_operators + from .techniques.remove_type_hints import remove_type_hints + from .techniques.minify_structures import minify_structures + from .techniques.unicode_shortcuts import unicode_shortcuts + from .techniques.shorten_print import shorten_print + from .techniques.remove_asserts import remove_asserts + from .techniques.remove_pass import remove_pass + engine = OptimizationEngine() + engine.register("remove_comments", remove_comments) + engine.register("remove_docstrings", remove_docstrings) + engine.register("remove_blank_lines", remove_blank_lines) + engine.register("remove_extra_spaces", remove_extra_spaces) + engine.register("single_line_mode", single_line_mode) + engine.register("shorten_keywords", shorten_keywords) + engine.register("replace_booleans", replace_booleans) + engine.register("use_short_operators", use_short_operators) + engine.register("remove_type_hints", remove_type_hints) + engine.register("minify_structures", minify_structures) + engine.register("unicode_shortcuts", unicode_shortcuts) + engine.register("shorten_print", shorten_print) + engine.register("remove_asserts", remove_asserts) + engine.register("remove_pass", remove_pass) + return engine diff --git a/src/ai_token_crusher/core/config.py b/src/ai_token_crusher/core/config.py new file mode 100644 index 0000000..6c1fad7 --- /dev/null +++ b/src/ai_token_crusher/core/config.py @@ -0,0 +1,22 @@ +OPTIONS_DEFAULT = { + "remove_comments": True, + "remove_docstrings": True, + "remove_blank_lines": True, + "remove_extra_spaces": True, + "single_line_mode": True, + "shorten_keywords": True, + "replace_booleans": True, + "use_short_operators": True, + "remove_type_hints": True, + "minify_structures": True, + "unicode_shortcuts": True, + "shorten_print": True, + "remove_asserts": True, + "remove_pass": True, +} + +PROFILES = { + "safe": {**OPTIONS_DEFAULT, "single_line_mode": False, "use_short_operators": False, "unicode_shortcuts": False, "replace_booleans": False}, + "aggressive": OPTIONS_DEFAULT.copy(), + "ECH": {k: True for k in OPTIONS_DEFAULT.keys()}, +} diff --git a/src/ai_token_crusher/core/engine.py b/src/ai_token_crusher/core/engine.py new file mode 100644 index 0000000..96f796d --- /dev/null +++ b/src/ai_token_crusher/core/engine.py @@ -0,0 +1,45 @@ +import time +from typing import Dict, Callable +from .models import OptimizationResult +from .config import OPTIONS_DEFAULT + +class OptimizationEngine: + def __init__(self): + self.techniques: Dict[str, Callable[[str], str]] = {} + self.order = list(OPTIONS_DEFAULT.keys()) + + def register(self, name: str, func: Callable[[str], str]): + self.techniques[name] = func + if name not in self.order: + self.order.append(name) + + def apply(self, text: str, options: Dict[str, bool]) -> OptimizationResult: + start = time.perf_counter() + result = text + stats = {} + + for name in self.order: + if options.get(name, False) and name in self.techniques: + func = self.techniques[name] + t0 = time.perf_counter() + before = len(result) + result = func(result) + after = len(result) + t = (time.perf_counter() - t0) * 1000 + + saved = before - after + pct = saved / before * 100 if before else 0 + stats[name] = {"time_ms": t, "saved_chars": saved, "saved_percent": pct} + + total_time = (time.perf_counter() - start) * 1000 + total_saved = len(text) - len(result) + total_pct = total_saved / len(text) * 100 if text else 0 + stats["TOTAL"] = {"time_ms": total_time, "saved_percent": total_pct, "saved_chars": total_saved} + + return OptimizationResult( + optimized_text=result.rstrip() + ("\n" if result.strip() else ""), + stats=stats, + total_saved_percent=total_pct, + total_saved_chars=total_saved, + total_time_ms=total_time, + ) diff --git a/src/ai_token_crusher/core/models.py b/src/ai_token_crusher/core/models.py new file mode 100644 index 0000000..c2a40f5 --- /dev/null +++ b/src/ai_token_crusher/core/models.py @@ -0,0 +1,10 @@ +from dataclasses import dataclass +from typing import Dict + +@dataclass +class OptimizationResult: + optimized_text: str + stats: Dict[str, Dict[str, float]] + total_saved_percent: float + total_saved_chars: int + total_time_ms: float diff --git a/src/ai_token_crusher/core/techniques/minify_structures.py b/src/ai_token_crusher/core/techniques/minify_structures.py new file mode 100644 index 0000000..dfed5c8 --- /dev/null +++ b/src/ai_token_crusher/core/techniques/minify_structures.py @@ -0,0 +1,8 @@ +# src/core/techniques/minify_structures.py +import re + + +def minify_structures(text: str) -> str: + text = re.sub(r',\s+', ',', text) + text = re.sub(r':\s+', ':', text) + return text diff --git a/src/ai_token_crusher/core/techniques/remove_asserts.py b/src/ai_token_crusher/core/techniques/remove_asserts.py new file mode 100644 index 0000000..56d2e33 --- /dev/null +++ b/src/ai_token_crusher/core/techniques/remove_asserts.py @@ -0,0 +1,8 @@ +# src/ai_token_crusher/core/techniques/remove_asserts.py +import re + +def remove_asserts(text: str) -> str: + # Remove assert statements (safe in production) + text = re.sub(r'^assert .*$\n?', '', text, flags=re.MULTILINE) + text = re.sub(r',\s*assert .*', '', text) # در صورت inline + return text \ No newline at end of file diff --git a/src/ai_token_crusher/core/techniques/remove_blank_lines.py b/src/ai_token_crusher/core/techniques/remove_blank_lines.py new file mode 100644 index 0000000..8825505 --- /dev/null +++ b/src/ai_token_crusher/core/techniques/remove_blank_lines.py @@ -0,0 +1,4 @@ +# src/core/techniques/remove_blank_lines.py +def remove_blank_lines(text: str) -> str: + text = "\n".join(line for line in text.splitlines() if line.strip()) + return text diff --git a/src/ai_token_crusher/core/techniques/remove_comments.py b/src/ai_token_crusher/core/techniques/remove_comments.py new file mode 100644 index 0000000..75bb817 --- /dev/null +++ b/src/ai_token_crusher/core/techniques/remove_comments.py @@ -0,0 +1,11 @@ +# src/ai_token_crusher/core/techniques/remove_comments.py +import re + +def remove_comments(text: str) -> str: + # Remove single-line comments + text = re.sub(r'#.*', '', text) + + # Remove triple-quoted strings (multi-line comments / docstrings in code) + text = re.sub(r'"""[\s\S]*?"""|\'\'\'[\s\S]*?\'\'\'', '', text, flags=re.DOTALL) + + return text \ No newline at end of file diff --git a/src/ai_token_crusher/core/techniques/remove_docstrings.py b/src/ai_token_crusher/core/techniques/remove_docstrings.py new file mode 100644 index 0000000..9cc3796 --- /dev/null +++ b/src/ai_token_crusher/core/techniques/remove_docstrings.py @@ -0,0 +1,7 @@ +# src/core/techniques/remove_docstrings.py +import re + + +def remove_docstrings(text: str) -> str: + text = re.sub(r'^[\r\n\s]*("""|\'\'\').*?\1', '', text, count=1, flags=re.DOTALL) + return text diff --git a/src/ai_token_crusher/core/techniques/remove_extra_spaces.py b/src/ai_token_crusher/core/techniques/remove_extra_spaces.py new file mode 100644 index 0000000..f6b76f5 --- /dev/null +++ b/src/ai_token_crusher/core/techniques/remove_extra_spaces.py @@ -0,0 +1,7 @@ +# src/core/techniques/remove_extra_spaces.py +import re + + +def remove_extra_spaces(text: str) -> str: + text = re.sub(r'[ \t]+', ' ', text) + return text diff --git a/src/ai_token_crusher/core/techniques/remove_pass.py b/src/ai_token_crusher/core/techniques/remove_pass.py new file mode 100644 index 0000000..fd42123 --- /dev/null +++ b/src/ai_token_crusher/core/techniques/remove_pass.py @@ -0,0 +1,13 @@ +# src/ai_token_crusher/core/techniques/remove_pass.py +def remove_pass(text: str) -> str: + # Remove lone 'pass' statements + lines = text.splitlines() + new_lines = [] + for line in lines: + stripped = line.strip() + if stripped != "pass": + new_lines.append(line) + # اگر خط خالی بعد از pass باشه، حذف نکن (ممکنه ساختار باشه) + elif new_lines and new_lines[-1].strip() == "": + new_lines.pop() # حذف خط خالی قبلش + return "\n".join(new_lines) \ No newline at end of file diff --git a/src/ai_token_crusher/core/techniques/remove_type_hints.py b/src/ai_token_crusher/core/techniques/remove_type_hints.py new file mode 100644 index 0000000..dc0c2d7 --- /dev/null +++ b/src/ai_token_crusher/core/techniques/remove_type_hints.py @@ -0,0 +1,8 @@ +# src/core/techniques/remove_type_hints.py +import re + + +def remove_type_hints(text: str) -> str: + text = re.sub(r':\s*[^=\n\->]+', '', text) + text = re.sub(r'->\s*[^:\n]+', '', text) + return text diff --git a/src/ai_token_crusher/core/techniques/replace_booleans.py b/src/ai_token_crusher/core/techniques/replace_booleans.py new file mode 100644 index 0000000..71025c8 --- /dev/null +++ b/src/ai_token_crusher/core/techniques/replace_booleans.py @@ -0,0 +1,4 @@ +# src/core/techniques/replace_booleans.py +def replace_booleans(text: str) -> str: + text = text.replace("True", "1").replace("False", "0").replace("None", "~") + return text diff --git a/src/ai_token_crusher/core/techniques/shorten_keywords.py b/src/ai_token_crusher/core/techniques/shorten_keywords.py new file mode 100644 index 0000000..f0ba24f --- /dev/null +++ b/src/ai_token_crusher/core/techniques/shorten_keywords.py @@ -0,0 +1,9 @@ +# src/core/techniques/shorten_keywords.py +def shorten_keywords(text: str) -> str: + rep = { + "def ": "d ", "return ": "r ", "import ": "i ", "from ": "f ", "as ": "a ", + "if ": "if", "class ": "c ", "lambda ": "λ " + } + for k, v in rep.items(): + text = text.replace(k, v) + return text diff --git a/src/ai_token_crusher/core/techniques/shorten_print.py b/src/ai_token_crusher/core/techniques/shorten_print.py new file mode 100644 index 0000000..312f387 --- /dev/null +++ b/src/ai_token_crusher/core/techniques/shorten_print.py @@ -0,0 +1,7 @@ +# src/core/techniques/shorten_print.py +import re + + +def shorten_print(text: str) -> str: + text = re.sub(r'print\s*\(', 'p(', text) + return text diff --git a/src/ai_token_crusher/core/techniques/single_line_mode.py b/src/ai_token_crusher/core/techniques/single_line_mode.py new file mode 100644 index 0000000..0c66b9c --- /dev/null +++ b/src/ai_token_crusher/core/techniques/single_line_mode.py @@ -0,0 +1,4 @@ +# src/core/techniques/single_line_mode.py +def single_line_mode(text: str) -> str: + text = text.replace("\n", "⏎") + return text diff --git a/src/ai_token_crusher/core/techniques/unicode_shortcuts.py b/src/ai_token_crusher/core/techniques/unicode_shortcuts.py new file mode 100644 index 0000000..91e8ca5 --- /dev/null +++ b/src/ai_token_crusher/core/techniques/unicode_shortcuts.py @@ -0,0 +1,9 @@ +# src/core/techniques/unicode_shortcuts.py +import re + + +def unicode_shortcuts(text: str) -> str: + text = re.sub(r'\bnot\s+in\b', '∉', text) + text = re.sub(r'\bin\b', '∈', text) + text = text.replace(" not in ", "∉").replace(" in ", "∈") + return text diff --git a/src/ai_token_crusher/core/techniques/use_short_operators.py b/src/ai_token_crusher/core/techniques/use_short_operators.py new file mode 100644 index 0000000..1b0d470 --- /dev/null +++ b/src/ai_token_crusher/core/techniques/use_short_operators.py @@ -0,0 +1,10 @@ +# src/core/techniques/use_short_operators.py +import re + + +def use_short_operators(text: str) -> str: + text = text.replace("==", "≡").replace("!=", "≠") + text = text.replace(" and ", "∧").replace(" or ", "∨") + text = re.sub(r'\band\b', '∧', text) + text = re.sub(r'\bor\b', '∨', text) + return text diff --git a/src/ai_token_crusher/interfaces/cli/main.py b/src/ai_token_crusher/interfaces/cli/main.py new file mode 100644 index 0000000..34645cb --- /dev/null +++ b/src/ai_token_crusher/interfaces/cli/main.py @@ -0,0 +1,39 @@ +import argparse +import sys +from ...core import create_engine, PROFILES + +def run_cli(argv=None): + parser = argparse.ArgumentParser(description="AI Token Crusher - CLI Mode") + parser.add_argument("-f", "--file", help="Input file") + parser.add_argument("-o", "--output", help="Output file") + parser.add_argument("-p", "--profile", choices=PROFILES.keys(), default="aggressive") + parser.add_argument("-t", "--terminal", action="store_true", help="Force terminal mode") + + args = parser.parse_args(argv) + + options = PROFILES[args.profile] + text = "" + if args.file: + with open(args.file, "r", encoding="utf-8") as f: + text = f.read() + else: + text = sys.stdin.read() + + if not text.strip(): + print("No input provided.") + return + + engine = create_engine() + result = engine.apply(text, options) + + if args.output: + with open(args.output, "w", encoding="utf-8") as f: + f.write(result.optimized_text) + print(f"Saved to {args.output}") + else: + print(result.optimized_text) + + print(f"\nSaved {result.total_saved_percent:.1f}% ({result.total_saved_chars} chars) in {result.total_time_ms:.2f}ms") + +if __name__ == "__main__": + run_cli() diff --git a/src/ai_token_crusher/interfaces/gui/app.py b/src/ai_token_crusher/interfaces/gui/app.py new file mode 100644 index 0000000..d2ac313 --- /dev/null +++ b/src/ai_token_crusher/interfaces/gui/app.py @@ -0,0 +1,195 @@ +# src/ai_token_crusher/interfaces/gui/app.py +import tkinter as tk +from tkinter import ttk, filedialog, messagebox, scrolledtext +import webbrowser +from pathlib import Path + +# --- Drag & Drop --- +try: + from tkinterdnd2 import TkinterDnD, DND_FILES + DND_AVAILABLE = True +except ImportError: + TkinterDnD = tk + DND_AVAILABLE = False + +from ...core import create_engine, OPTIONS_DEFAULT +from .ui import create_modern_ui +from .theme import THEMES + + +class TokenCrusherGUI: + def __init__(self, root): + self.root = root + self.root.title("AI Token Crusher v1.2") + self.root.geometry("1520x940") + self.root.minsize(1200, 760) + self.root.configure(bg="#0d1117") + self.root.state('zoomed') if tk.TkVersion >= 8.6 else None # Fullscreen on Windows + + self.engine = create_engine() + self.options = {k: tk.BooleanVar(value=v) for k, v in OPTIONS_DEFAULT.items()} + self.is_dark_theme = True + self.ui_elements = {} + + # Modern style + style = ttk.Style() + style.theme_use("clam") + style.configure("Title.TLabel", font=("Segoe UI", 28, "bold"), foreground="#58a6ff") + style.configure("Subtitle.TLabel", font=("Segoe UI", 12), foreground="#8b949e") + style.configure("TButton", font=("Segoe UI", 11, "bold"), padding=10) + style.map("TButton", background=[("active", "#1f6feb")]) + + create_modern_ui(self) + self.apply_theme() + + if DND_AVAILABLE: + self.enable_drag_drop() + + # Center window + self.root.update_idletasks() + x = (self.root.winfo_screenwidth() // 2) - (self.root.winfo_width() // 2) + y = (self.root.winfo_screenheight() // 2) - (self.root.winfo_height() // 2) + self.root.geometry(f"+{x}+{y}") + + def apply_theme(self): + theme = THEMES["dark" if self.is_dark_theme else "light"] + self.root.configure(bg=theme["bg"]) + + for widget in self.ui_elements.values(): + if hasattr(widget, "configure"): + cfg = widget.config() + if "bg" in cfg: + widget.configure(bg=theme.get("bg", theme["frame_bg"])) + if "fg" in cfg: + widget.configure(fg=theme.get("text_bright", theme["text"])) + + # رنگ‌های معتبر برای tkinter + self.input_text.configure( + bg=theme["input_bg"], + fg=theme["input_fg"], + insertbackground=theme["text"], # مکان‌نما + selectbackground="#1f6feb" # انتخاب متن (رنگ ثابت و معتبر) + ) + self.output_text.configure( + fg=theme["output_fg"], + bg=theme["input_bg"], + insertbackground=theme["output_fg"], + selectbackground="#1f6feb" + ) + self.stats.configure( + foreground=theme["accent"], + background=theme["frame_bg"], + font=("Consolas", 13, "bold") + ) + self.theme_button.config( + text="Light Mode" if self.is_dark_theme else "Dark Mode", + bg=theme["frame_bg"], fg=theme["accent"], font=("Segoe UI", 12, "bold"), + relief="flat", bd=0, highlightthickness=0 + ) + + for cb in getattr(self, "checkbuttons", []): + cb.configure(bg=theme["frame_bg"], fg=theme["text"], selectcolor=theme["select_bg"]) + for link in getattr(self, "link_labels", []): + link.configure(fg=theme["accent"], bg=theme["bg"]) + + + def toggle_theme(self): + self.is_dark_theme = not self.is_dark_theme + self.apply_theme() + + def enable_drag_drop(self): + if not DND_AVAILABLE: + return + try: + text_widget = self.input_text._text + text_widget.drop_target_register(DND_FILES) + text_widget.dnd_bind('<>', self.on_drop) + self.input_text.drop_target_register(DND_FILES) + self.input_text.dnd_bind('<>', self.on_drop) + except Exception as e: + print(f"[DND] Failed: {e}") + + def on_drop(self, event): + data = event.data.strip() + if data.startswith('{') and data.endswith('}'): + data = data[1:-1] + import shlex + try: + files = shlex.split(data) + except: + files = [f.strip('{}') for f in data.split()] + + for fp in files: + fp = fp.strip('"\'') + path = Path(fp) + if path.exists(): + if path.suffix.lower() in {".py", ".js", ".ts", ".html", ".css", ".json", ".md", ".txt", ".log", ".yaml", ".yml", ".sql"}: + try: + content = path.read_text(encoding="utf-8", errors="ignore") + self.input_text.delete(1.0, tk.END) + self.input_text.insert(tk.END, content) + self.stats.config(text=f"Dropped • {path.name} • {len(content):,} chars") + return + except Exception as e: + messagebox.showerror("Error", f"Cannot read file:\n{e}") + return + + def load_file(self): + path = filedialog.askopenfilename( + title="Open source file", + filetypes=[("All Supported", "*.py *.js *.ts *.jsx *.tsx *.html *.css *.json *.md *.yaml *.yml *.txt *.log *.sql"), ("All Files", "*.*")] + ) + if path: + self.load_text_from_file(path) + + def load_text_from_file(self, path): + try: + content = Path(path).read_text(encoding="utf-8", errors="ignore") + self.input_text.delete(1.0, tk.END) + self.input_text.insert(tk.END, content) + self.stats.config(text=f"Loaded • {Path(path).name} • {len(content):,} chars") + except Exception as e: + messagebox.showerror("Error", f"Failed to load:\n{e}") + + def copy_output(self): + text = self.output_text.get(1.0, tk.END).strip() + if text: + self.root.clipboard_clear() + self.root.clipboard_append(text) + messagebox.showinfo("Copied", "Crushed output copied!") + + def save_output(self): + path = filedialog.asksaveasfilename(defaultextension=".py", title="Save crushed code") + if path: + try: + Path(path).write_text(self.output_text.get(1.0, tk.END), encoding="utf-8") + messagebox.showinfo("Saved", f"Saved to {Path(path).name}") + except Exception as e: + messagebox.showerror("Error", f"Save failed:\n{e}") + + def optimize(self): + text = self.input_text.get(1.0, tk.END).strip() + if not text: + messagebox.showwarning("No Input", "Drop a file or paste code first.") + return + + options = {k: v.get() for k, v in self.options.items()} + result = self.engine.apply(text, options) + + self.output_text.delete(1.0, tk.END) + self.output_text.insert(tk.END, result.optimized_text) + + saved = result.total_saved_percent + before = len(text) + after = len(result.optimized_text) + time_ms = result.total_time_ms + + self.stats.config( + text=f"Success • Saved {saved:.1f}% • {before:,} → {after:,} chars • {time_ms:.1f}ms" + ) + + +def run_gui(): + root = TkinterDnD.Tk() if DND_AVAILABLE else tk.Tk() + app = TokenCrusherGUI(root) + root.mainloop() \ No newline at end of file diff --git a/src/ai_token_crusher/interfaces/gui/theme.py b/src/ai_token_crusher/interfaces/gui/theme.py new file mode 100644 index 0000000..a2d73f8 --- /dev/null +++ b/src/ai_token_crusher/interfaces/gui/theme.py @@ -0,0 +1,36 @@ +# src/ai_token_crusher/interfaces/gui/theme.py +THEMES = { + "dark": { + "bg": "#0d1117", + "frame_bg": "#161b22", + "text": "#c9d1d9", + "text_secondary": "#8b949e", + "text_bright": "#f0f6fc", + "accent": "#58a6ff", + "accent_secondary": "#79c0ff", + "select_bg": "#21262d", + "input_bg": "#0d1117", + "input_fg": "#c9d1d9", + "output_fg": "#79c0ff", + }, + "light": { + "bg": "#ffffff", + "frame_bg": "#f6f8fa", + "text": "#24292f", + "text_secondary": "#57606a", + "text_bright": "#1f2328", + "accent": "#0969da", + "accent_secondary": "#0550ae", + "select_bg": "#ddf4ff", + "input_bg": "#ffffff", + "input_fg": "#24292f", + "output_fg": "#0550ae", + } +} + +LINKS = [ + ("GitHub", "https://github.com/totalbrain/TokenOptimizer"), + ("Roadmap", "https://github.com/users/totalbrain/projects/1"), + ("Product Hunt", "https://www.producthunt.com/posts/ai-token-crusher"), +] + diff --git a/src/ai_token_crusher/interfaces/gui/ui.py b/src/ai_token_crusher/interfaces/gui/ui.py new file mode 100644 index 0000000..3650f46 --- /dev/null +++ b/src/ai_token_crusher/interfaces/gui/ui.py @@ -0,0 +1,112 @@ +# src/ai_token_crusher/interfaces/gui/ui.py +import tkinter as tk +from tkinter import ttk, scrolledtext +import webbrowser +from .theme import THEMES, LINKS + + +def create_modern_ui(app): + theme = THEMES["dark" if app.is_dark_theme else "light"] + + # Header + header = tk.Frame(app.root, bg=theme["bg"], height=90) + header.pack(fill="x", padx=30, pady=(20, 0)) + header.pack_propagate(False) + + tk.Label(header, text="AI Token Crusher", font=("Segoe UI", 32, "bold"), fg=theme["accent"], bg=theme["bg"]).pack(side="left", pady=10) + + app.theme_button = tk.Button( + header, text="Light Mode", command=app.toggle_theme, + bg=theme["frame_bg"], fg=theme["accent"], font=("Segoe UI", 12, "bold"), + relief="flat", bd=0, padx=20, pady=10, cursor="hand2" + ) + app.theme_button.pack(side="right", pady=10) + + tk.Label(header, text="Crush up to 75% of tokens instantly • Grok • GPT • Claude • Llama", + font=("Segoe UI", 11), fg=theme["text_secondary"], bg=theme["bg"]).pack(side="left", padx=20) + + # Main Content Area + content_frame = tk.Frame(app.root, bg=theme["bg"]) + content_frame.pack(fill="both", expand=True, padx=30, pady=20) + + # Input + Options Side by Side (با grid — بدون PanedWindow!) + input_options_frame = tk.Frame(content_frame, bg=theme["bg"]) + input_options_frame.pack(fill="both", expand=True) + + # Input Panel (چپ) + input_frame = tk.LabelFrame(input_options_frame, text=" Input • Drop file or paste code ", + font=("Segoe UI", 11, "bold"), fg=theme["text_bright"], bg=theme["frame_bg"], bd=2, relief="groove") + input_frame.grid(row=0, column=0, sticky="nsew", padx=(0, 10)) + + app.input_text = scrolledtext.ScrolledText(input_frame, font=("Consolas", 11), bg=theme["input_bg"], fg=theme["input_fg"], undo=True) + app.input_text.pack(fill="both", expand=True, padx=12, pady=12) + + btn_frame = tk.Frame(input_frame, bg=theme["frame_bg"]) + btn_frame.pack(pady=8) + ttk.Button(btn_frame, text="Load File", command=app.load_file).pack(side="left", padx=8) + ttk.Button(btn_frame, text="Clear Input", command=lambda: app.input_text.delete(1.0, tk.END)).pack(side="left", padx=8) + + # Options Panel (راست) + options_frame = tk.LabelFrame(input_options_frame, text=" Optimization Techniques ", + font=("Segoe UI", 11, "bold"), fg=theme["text_bright"], bg=theme["frame_bg"], bd=2, relief="groove") + options_frame.grid(row=0, column=1, sticky="nsew", padx=(10, 0)) + + canvas = tk.Canvas(options_frame, bg=theme["frame_bg"], highlightthickness=0) + scrollbar = ttk.Scrollbar(options_frame, orient="vertical", command=canvas.yview) + scrollable = tk.Frame(canvas, bg=theme["frame_bg"]) + scrollable.bind("", lambda e: canvas.configure(scrollregion=canvas.bbox("all"))) + canvas.create_window((0, 0), window=scrollable, anchor="nw") + canvas.configure(yscrollcommand=scrollbar.set) + canvas.pack(side="left", fill="both", expand=True, padx=12, pady=12) + scrollbar.pack(side="right", fill="y") + + app.checkbuttons = [] + for key, var in app.options.items(): + name = key.replace("_", " ").title().replace("Shorten", "Short").replace("Remove", "Strip") + cb = tk.Checkbutton(scrollable, text=name, variable=var, bg=theme["frame_bg"], fg=theme["text"], + selectcolor=theme["select_bg"], font=("Segoe UI", 10)) + cb.pack(anchor="w", pady=4, padx=20) + app.checkbuttons.append(cb) + + # تنظیم وزن برای تقسیم مساوی + input_options_frame.grid_columnconfigure(0, weight=1) + input_options_frame.grid_columnconfigure(1, weight=1) + input_options_frame.grid_rowconfigure(0, weight=1) + + # Bottom Section: Crush Button + Output + bottom = tk.Frame(content_frame, bg=theme["bg"]) + bottom.pack(fill="both", expand=True, pady=(20, 0)) + + ttk.Button(bottom, text="CRUSH TOKENS", command=app.optimize).pack(pady=15) + + output_frame = tk.LabelFrame(bottom, text=" Crushed Output • AI-Safe & Readable ", + font=("Segoe UI", 11, "bold"), fg=theme["text_bright"], bg=theme["frame_bg"], bd=2, relief="groove") + output_frame.pack(fill="both", expand=True) + + app.output_text = scrolledtext.ScrolledText(output_frame, font=("Consolas", 11), bg=theme["input_bg"], fg=theme["output_fg"]) + app.output_text.pack(fill="both", expand=True, padx=12, pady=12) + + btn_out = tk.Frame(output_frame, bg=theme["frame_bg"]) + btn_out.pack(pady=8) + ttk.Button(btn_out, text="Copy Output", command=app.copy_output).pack(side="left", padx=8) + ttk.Button(btn_out, text="Save As...", command=app.save_output).pack(side="left", padx=8) + + # Stats + app.stats = tk.Label(bottom, text="Ready to crush tokens...", font=("Consolas", 13, "bold"), + fg=theme["accent"], bg=theme["bg"]) + app.stats.pack(pady=10) + + # Footer + footer = tk.Frame(app.root, bg=theme["bg"]) + footer.pack(pady=15) + app.link_labels = [] + for text, url in LINKS: + link = tk.Label(footer, text=text, fg=theme["accent"], bg=theme["bg"], cursor="hand2", + font=("Segoe UI", 10, "underline")) + link.pack(side="left", padx=25) + link.bind("", lambda e, u=url: webbrowser.open(u)) + app.link_labels.append(link) + + app.ui_elements.update({ + "header": header, "content_frame": content_frame, "footer": footer + }) \ No newline at end of file