diff --git a/openevolve/config.py b/openevolve/config.py index e01db8697..441a78590 100644 --- a/openevolve/config.py +++ b/openevolve/config.py @@ -79,6 +79,10 @@ class LLMModelConfig: # Reasoning parameters reasoning_effort: Optional[str] = None + # Manual mode (human-in-the-loop) + manual_mode: Optional[bool] = None + _manual_queue_dir: Optional[str] = None + def __post_init__(self): """Post-initialization to resolve ${VAR} env var references in api_key""" self.api_key = _resolve_env_var(self.api_key) @@ -117,6 +121,9 @@ class LLMConfig(LLMModelConfig): # Reasoning parameters (inherited from LLMModelConfig but can be overridden) reasoning_effort: Optional[str] = None + # Manual mode switch + manual_mode: bool = False + def __post_init__(self): """Post-initialization to set up model configurations""" super().__post_init__() # Resolve ${VAR} in api_key at LLMConfig level @@ -171,6 +178,7 @@ def __post_init__(self): "retry_delay": self.retry_delay, "random_seed": self.random_seed, "reasoning_effort": self.reasoning_effort, + "manual_mode": self.manual_mode, } self.update_model_params(shared_config) diff --git a/openevolve/controller.py b/openevolve/controller.py index e4e250222..ae3a1566d 100644 --- a/openevolve/controller.py +++ b/openevolve/controller.py @@ -5,6 +5,7 @@ import asyncio import logging import os +import shutil import signal import time import uuid @@ -85,6 +86,9 @@ def __init__( # Set up logging self._setup_logging() + # Manual mode queue lives in /manual_tasks_queue + self._setup_manual_mode_queue() + # Set random seed for reproducibility if specified if self.config.random_seed is not None: import hashlib @@ -208,6 +212,35 @@ def _setup_logging(self) -> None: logger.info(f"Logging to {log_file}") + def _setup_manual_mode_queue(self) -> None: + """ + Set up manual task queue directory if llm.manual_mode is enabled + + Queue directory is always: + /manual_tasks_queue + + The directory is cleared on controller start so the UI shows only tasks + from the current run (no stale tasks after restart) + """ + if not bool(getattr(self.config.llm, "manual_mode", False)): + return + + qdir = (Path(self.output_dir).expanduser().resolve() / "manual_tasks_queue") + + # Clear stale tasks from previous runs + if qdir.exists(): + shutil.rmtree(qdir) + qdir.mkdir(parents=True, exist_ok=True) + + # Inject runtime-only queue dir into configs + self.config.llm._manual_queue_dir = str(qdir) + for model_cfg in self.config.llm.models: + model_cfg._manual_queue_dir = str(qdir) + for model_cfg in self.config.llm.evaluator_models: + model_cfg._manual_queue_dir = str(qdir) + + logger.info(f"Manual mode enabled. Queue dir: {qdir}") + def _load_initial_program(self) -> str: """Load the initial program from file""" with open(self.initial_program_path, "r") as f: diff --git a/openevolve/llm/openai.py b/openevolve/llm/openai.py index 4f86f9bb9..7477e5b34 100644 --- a/openevolve/llm/openai.py +++ b/openevolve/llm/openai.py @@ -1,20 +1,49 @@ """ OpenAI API interface for LLMs + +This module also supports a "manual mode" (human-in-the-loop) where prompts are written +to a task queue directory and the system waits for a corresponding *.answer.json file """ import asyncio +import json import logging import time +import uuid +from datetime import datetime, timezone +from pathlib import Path from typing import Any, Dict, List, Optional, Union import openai -from openevolve.config import LLMConfig from openevolve.llm.base import LLMInterface logger = logging.getLogger(__name__) +def _iso_now() -> str: + return datetime.now(tz=timezone.utc).isoformat() + + +def _build_display_prompt(messages: List[Dict[str, str]]) -> str: + """ + Render messages into a single plain-text prompt for the manual UI. + """ + chunks: List[str] = [] + for m in messages: + role = str(m.get("role", "user")).upper() + content = m.get("content", "") + chunks.append(f"### {role}\n{content}\n") + return "\n".join(chunks).rstrip() + "\n" + + +def _atomic_write_json(path: Path, payload: Dict[str, Any]) -> None: + path.parent.mkdir(parents=True, exist_ok=True) + tmp = path.parent / f".{path.name}.tmp" + tmp.write_text(json.dumps(payload, ensure_ascii=False, indent=2), encoding="utf-8") + tmp.replace(path) + + class OpenAILLM(LLMInterface): """LLM interface using OpenAI-compatible APIs""" @@ -35,15 +64,30 @@ def __init__( self.random_seed = getattr(model_cfg, "random_seed", None) self.reasoning_effort = getattr(model_cfg, "reasoning_effort", None) - # Set up API client - # OpenAI client requires max_retries to be int, not None - max_retries = self.retries if self.retries is not None else 0 - self.client = openai.OpenAI( - api_key=self.api_key, - base_url=self.api_base, - timeout=self.timeout, - max_retries=max_retries, - ) + # Manual mode: enabled via llm.manual_mode in config.yaml + self.manual_mode = (getattr(model_cfg, "manual_mode", False) is True) + self.manual_queue_dir: Optional[Path] = None + + if self.manual_mode: + qdir = getattr(model_cfg, "_manual_queue_dir", None) + if not qdir: + raise ValueError( + "Manual mode is enabled but manual_queue_dir is missing. " + "This should be injected by the OpenEvolve controller." + ) + self.manual_queue_dir = Path(str(qdir)).expanduser().resolve() + self.manual_queue_dir.mkdir(parents=True, exist_ok=True) + self.client = None + else: + # Set up API client (normal mode) + # OpenAI client requires max_retries to be int, not None + max_retries = self.retries if self.retries is not None else 0 + self.client = openai.OpenAI( + api_key=self.api_key, + base_url=self.api_base, + timeout=self.timeout, + max_retries=max_retries, + ) # Only log unique models to reduce duplication if not hasattr(logger, "_initialized_models"): @@ -122,8 +166,9 @@ async def generate_with_context( # Add seed parameter for reproducibility if configured # Skip seed parameter for Google AI Studio endpoint as it doesn't support it + # Seed only makes sense for actual API calls seed = kwargs.get("seed", self.random_seed) - if seed is not None: + if seed is not None and not self.manual_mode: if self.api_base == "https://generativelanguage.googleapis.com/v1beta/openai/": logger.warning( "Skipping seed parameter as Google AI Studio endpoint doesn't support it. " @@ -135,6 +180,12 @@ async def generate_with_context( # Attempt the API call with retries retries = kwargs.get("retries", self.retries) retry_delay = kwargs.get("retry_delay", self.retry_delay) + + # Manual mode: no timeout unless explicitly passed by the caller + if self.manual_mode: + timeout = kwargs.get("timeout", None) + return await self._manual_wait_for_answer(params, timeout=timeout) + timeout = kwargs.get("timeout", self.timeout) for attempt in range(retries + 1): @@ -160,6 +211,9 @@ async def generate_with_context( async def _call_api(self, params: Dict[str, Any]) -> str: """Make the actual API call""" + if self.client is None: + raise RuntimeError("OpenAI client is not initialized (manual_mode enabled?)") + # Use asyncio to run the blocking API call in a thread pool loop = asyncio.get_event_loop() response = await loop.run_in_executor( @@ -170,3 +224,63 @@ async def _call_api(self, params: Dict[str, Any]) -> str: logger.debug(f"API parameters: {params}") logger.debug(f"API response: {response.choices[0].message.content}") return response.choices[0].message.content + + async def _manual_wait_for_answer( + self, params: Dict[str, Any], timeout: Optional[Union[int, float]] + ) -> str: + """ + Manual mode: write a task JSON file and poll for *.answer.json + If timeout is provided, we respect it; otherwise we wait indefinitely + """ + + if self.manual_queue_dir is None: + raise RuntimeError("manual_queue_dir is not initialized") + + task_id = str(uuid.uuid4()) + messages = params.get("messages", []) + display_prompt = _build_display_prompt(messages) + + task_payload: Dict[str, Any] = { + "id": task_id, + "created_at": _iso_now(), + "model": params.get("model"), + "display_prompt": display_prompt, + "messages": messages, + "meta": { + "max_tokens": params.get("max_tokens"), + "max_completion_tokens": params.get("max_completion_tokens"), + "temperature": params.get("temperature"), + "top_p": params.get("top_p"), + "reasoning_effort": params.get("reasoning_effort"), + "verbosity": params.get("verbosity"), + }, + } + + task_path = self.manual_queue_dir / f"{task_id}.json" + answer_path = self.manual_queue_dir / f"{task_id}.answer.json" + + _atomic_write_json(task_path, task_payload) + logger.info(f"[manual_mode] Task enqueued: {task_path}") + + start = time.time() + poll_interval = 0.5 + + while True: + if answer_path.exists(): + try: + data = json.loads(answer_path.read_text(encoding="utf-8")) + except Exception as e: + logger.warning(f"[manual_mode] Failed to parse answer JSON for {task_id}: {e}") + await asyncio.sleep(poll_interval) + continue + + answer = str(data.get("answer") or "") + logger.info(f"[manual_mode] Answer received for {task_id}") + return answer + + if timeout is not None and (time.time() - start) > float(timeout): + raise asyncio.TimeoutError( + f"Manual mode timed out after {timeout} seconds waiting for answer of task {task_id}" + ) + + await asyncio.sleep(poll_interval) diff --git a/scripts/manual.py b/scripts/manual.py new file mode 100644 index 000000000..6feb0ca3b --- /dev/null +++ b/scripts/manual.py @@ -0,0 +1,179 @@ +""" +Manual mode UI/API for the OpenEvolve visualizer + +This module exposes: + - GET /manual (manual tasks page) + - GET /manual/api/tasks (list tasks, pending only) + - GET /manual/api/tasks/ + - POST /manual/api/tasks//answer + +Task queue directory is assumed to be: + /manual_tasks_queue + +where run_root corresponds to the OpenEvolve run output directory (typically "openevolve_output") +""" + +import json +from dataclasses import dataclass +from pathlib import Path +from typing import Callable, Dict, List, Optional + +from flask import Blueprint, jsonify, render_template, request + +QUEUE_DIRNAME = "manual_tasks_queue" + + +def _resolve_run_root(path_str: str) -> Path: + """ + Resolve run root from a path that may point to: + - + - /checkpoints + - /checkpoints/checkpoint_123 + - /checkpoint_123 + + Returns: + Path to + """ + p = Path(path_str).expanduser().resolve() + + if p.name.startswith("checkpoint_"): + # .../checkpoints/checkpoint_123 -> run_root is parent of "checkpoints" + if p.parent.name == "checkpoints": + return p.parent.parent + # .../checkpoint_123 -> run_root is parent + return p.parent + + if p.name == "checkpoints": + return p.parent + + return p + + +def _queue_dir(run_root: Path) -> Path: + return run_root / QUEUE_DIRNAME + + +@dataclass +class TaskItem: + id: str + created_at: str + model: Optional[str] + has_answer: bool + + +def _list_tasks(qdir: Path) -> List[TaskItem]: + if not qdir.exists(): + return [] + + tasks: List[TaskItem] = [] + + for p in sorted(qdir.glob("*.json")): + # Ignore hidden and non-task JSON files + if p.name.startswith("."): + continue + if p.name.endswith(".answer.json"): + continue + + task_id = p.stem + answer = qdir / f"{task_id}.answer.json" + has_answer = answer.exists() + + created_at = "" + model = None + try: + data = json.loads(p.read_text(encoding="utf-8")) + created_at = str(data.get("created_at") or "") + model = data.get("model") + except Exception: + pass + + tasks.append(TaskItem(id=task_id, created_at=created_at, model=model, has_answer=has_answer)) + + # Show only pending + tasks = [t for t in tasks if not t.has_answer] + return tasks + + +def _read_task(qdir: Path, task_id: str) -> Optional[Dict]: + p = qdir / f"{task_id}.json" + if not p.exists(): + return None + try: + return json.loads(p.read_text(encoding="utf-8")) + except Exception: + return None + + +def _normalize_newlines(text: str) -> str: + """ + Normalize CRLF/CR newlines to LF + + This makes diff parsing deterministic because OpenEvolve diff regexes use '\n' + """ + return text.replace("\r\n", "\n").replace("\r", "\n").rstrip() + + +def _write_answer(qdir: Path, task_id: str, answer_text: str) -> None: + qdir.mkdir(parents=True, exist_ok=True) + out = qdir / f"{task_id}.answer.json" + tmp = qdir / f".{task_id}.answer.json.tmp" + + answer_text = _normalize_newlines(answer_text) + + payload = {"id": task_id, "answer": answer_text} + tmp.write_text(json.dumps(payload, ensure_ascii=False, indent=2), encoding="utf-8") + tmp.replace(out) + + +def create_manual_blueprint(get_visualizer_path: Callable[[], str]) -> Blueprint: + bp = Blueprint("manual", __name__, url_prefix="/manual") + + @bp.route("", methods=["GET"], strict_slashes=False) + @bp.route("/", methods=["GET"], strict_slashes=False) + def manual_page(): + return render_template("manual_page.html") + + @bp.get("/api/tasks") + def api_tasks(): + run_root = _resolve_run_root(get_visualizer_path()) + qdir = _queue_dir(run_root) + items = _list_tasks(qdir) + data = [{"id": t.id, "created_at": t.created_at, "model": t.model} for t in items] + return jsonify({"tasks": data}) + + @bp.get("/api/tasks/") + def api_task_detail(task_id: str): + run_root = _resolve_run_root(get_visualizer_path()) + qdir = _queue_dir(run_root) + data = _read_task(qdir, task_id) + if data is None: + return ("Task not found", 404) + + return jsonify({ + "id": data.get("id"), + "created_at": data.get("created_at"), + "model": data.get("model"), + "display_prompt": data.get("display_prompt", ""), + }) + + @bp.post("/api/tasks//answer") + def api_task_answer(task_id: str): + run_root = _resolve_run_root(get_visualizer_path()) + qdir = _queue_dir(run_root) + + # Accept form POST (current UI), optionally JSON too. + answer = (request.form.get("answer") or "").strip() + if not answer and request.is_json: + body = request.get_json(silent=True) or {} + answer = str(body.get("answer") or "").strip() + + if not answer: + return ("Answer must not be empty", 400) + + if not (qdir / f"{task_id}.json").exists(): + return ("Task not found", 404) + + _write_answer(qdir, task_id, answer) + return jsonify({"ok": True}) + + return bp diff --git a/scripts/static/css/manual.css b/scripts/static/css/manual.css new file mode 100644 index 000000000..22e578783 --- /dev/null +++ b/scripts/static/css/manual.css @@ -0,0 +1,249 @@ +:root { + --bg: #0b0d10; + --card: #151a21; + --muted: #8b95a7; + --text: #e7ecf3; + --accent: #4e9eff; + --border: #2a3340; + --surface: #ffffff; +} + +* { box-sizing: border-box; } +html, body { height: 100%; } +body { + margin: 0; + background: var(--bg); + color: var(--text); + font: 14px/1.5 ui-sans-serif, system-ui, -apple-system, Segoe UI, Roboto, Helvetica, Arial; +} + +.topbar { + display: flex; align-items: center; justify-content: space-between; + padding: 14px 16px; border-bottom: 1px solid var(--border); + background: rgba(255,255,255,0.02); position: sticky; top: 0; backdrop-filter: blur(6px); +} +.topbar h1 { margin: 0; font-size: 16px; } + +.container { padding: 18px; max-width: 1000px; margin: 0 auto; } + +.task-list { display: grid; gap: 12px; } +.task-card { + background: var(--card); border: 1px solid var(--border); + border-radius: 12px; padding: 12px; + display: flex; align-items: center; justify-content: space-between; +} +.task-meta { display: flex; gap: 12px; align-items: baseline; } +.task-id { font-family: ui-monospace, SFMono-Regular, Menlo, Consolas, monospace; color: var(--muted); } +.task-created { color: var(--muted); font-size: 12px; } +.task-model { color: var(--text); font-weight: 600; } + +button { + border: 1px solid var(--border); + background: transparent; + color: var(--text); + padding: 8px 12px; + border-radius: 10px; + cursor: pointer; +} +button.primary { + background: var(--accent); + color: #00132b; + border-color: transparent; +} +button.ghost { opacity: 0.9; } +button:hover { filter: brightness(1.05); } +button:active { transform: translateY(1px); } + +.overlay { + position: fixed; inset: 0; background: rgba(0,0,0,0.55); z-index: 20; +} + +/* You kept absolute fullscreen modal; keeping it */ +.modal { + position: fixed; + top: 0; bottom: 0; left: 0; right: 0; + height: 100%; width: 100%; + background: var(--surface); + color: #0c1117; + z-index: 30; + display: flex; flex-direction: column; + box-shadow: 0 16px 80px rgba(0,0,0,0.45); + overflow: hidden; /* keep inner scrollbars inside */ +} + +.hidden { display: none; } + +.icon-btn { + background: transparent; border: none; color: #111827; cursor: pointer; +} +.close-btn { + position: absolute; top: 14px; right: 14px; width: 36px; height: 36px; + display: grid; place-items: center; border-radius: 9px; padding: 0; +} +.close-btn:hover { background: rgba(0,0,0,0.06); } + +.modal-body { + position: absolute; + top: 14px; left: 14px; right: 64px; bottom: 14px; + height: 100%; + min-height: 0; /* critical for inner scrollers */ +} + +/* Panes in two columns using absolute math as you prefer */ +.pane { + position: absolute; + background: #f7f8fa; + border: 1px solid #e6e8ee; + border-radius: 12px; + min-height: 0; /* allow inner scrollers to work */ + width: calc(50% - 14px); + height: calc(100% - 14px); + display: flex; flex-direction: column; +} + +.left { right: calc(50% + 5px); } +.right { left: calc(50% + 5px); } + +.pane-title { + font-weight: 700; font-size: 12px; color: #4b5563; padding: 10px 8px; +} + +/* Monaco containers fill the available pane height */ +.editor-host { + position: absolute; + /* top: 42px; below .pane-title (~32 + padding) */ + /* top: 6px !important; */ + left: 10px; + right: 10px; + bottom: 52px; /* leave room for bottom buttons */ + border: 1px solid #1f2937; + border-radius: 8px; + overflow: hidden; /* Monaco manages inner scrollbars */ +} + +.right .editor-host { + top: 6px !important; +} + +/* Right pane puts a placeholder atop the editor until there's content */ +.editor-wrapper { position: relative; height: calc(100% - 42px); } + +/* buttons stay where you had them */ +.copy-btn { + position: absolute; right: 12px; bottom: 12px; + background: #eef3ff; color: #0c2050; border-color: #d8e2ff; +} +.complete-btn { + position: absolute; right: 12px; bottom: 12px; +} + +/* Make the text cursor appear when hovering editors (I-beam) */ +#promptEditor, #answerEditor { cursor: text; } + +/* Mobile adjustments still apply via your existing media queries */ + +/* ---------- Code viewers (both sides) ---------- */ +.code-viewer { + position: relative; + flex: 1; + border-radius: 8px; + border: 1px solid #1f2937; + background: #0f1720; + color: #e6edf3; + font-family: ui-monospace, SFMono-Regular, Menlo, Consolas, monospace; + line-height: 1.5; + padding: 12px; + overflow: auto; + counter-reset: line; + user-select: text; /* allow text selection */ + cursor: text; /* I-beam */ + outline: none; /* we provide our own focus style */ +} + +/* Focus ring for accessibility */ +.code-viewer:focus-visible { + box-shadow: 0 0 0 2px rgba(78,158,255,0.75) inset; +} + +/* Blue theme for the editor side */ +.code-viewer.editor { + background: linear-gradient(180deg, #0f1720 0%, #0e1b2e 100%); +} + +/* Readonly just changes caret behavior */ +.code-viewer.readonly { caret-color: transparent; } + +/* --- Line layout: works for read-only (.line elements) and editable (> div lines) --- */ +.code-viewer .line, +.code-viewer.editor > div { + display: block; + white-space: pre; + padding-left: 52px; + position: relative; +} + +/* line numbers */ +.code-viewer .line::before, +.code-viewer.editor > div::before { + counter-increment: line; + content: counter(line); + position: absolute; left: 10px; width: 32px; + text-align: right; color: #7a8696; +} + +/* active line highlight */ +.code-viewer .line.active, +.code-viewer.editor > div.active { + background: rgba(78,158,255,0.15); +} + +/* placeholder for editor when empty */ +.code-viewer.editor:empty::before { + content: attr(data-placeholder); + color: #7a8696; opacity: 0.7; + padding-left: 52px; + display: block; +} + +/* Copy/Complete buttons */ +.copy-btn { + position: absolute; right: 12px; bottom: 12px; + background: #eef3ff; color: #0c2050; border-color: #d8e2ff; +} +.complete-btn { + position: absolute; right: 12px; bottom: 12px; +} + +/* mobile */ +@media (max-width: 900px) { + .modal { inset: 0; } + .modal-body { left: 8px; right: 8px; } + .pane { width: calc(100% - 0px); left: 0; right: 0; } + .left { top: 0; height: calc(50% - 8px); } + .right { bottom: 0; height: calc(50% - 8px); } +} + + +/* Monaco containers fill the available pane height */ +.editor-host { + position: absolute; + top: 42px; /* below .pane-title */ + left: 10px; + right: 10px; + bottom: 52px; /* leave room for bottom buttons */ + border: 1px solid #1f2937; + border-radius: 8px; + overflow: hidden; +} + +.editor-wrapper { position: relative; height: calc(100% - 42px); } + +/* buttons stay where you had them */ +.copy-btn { + position: absolute; right: 12px; bottom: 12px; + background: #eef3ff; color: #0c2050; border-color: #d8e2ff; +} +.complete-btn { position: absolute; right: 12px; bottom: 12px; } + +/* I-beam cursor when hovering editors */ +#promptEditor, #answerEditor { cursor: text; } diff --git a/scripts/static/js/manual.js b/scripts/static/js/manual.js new file mode 100644 index 000000000..db88d4cff --- /dev/null +++ b/scripts/static/js/manual.js @@ -0,0 +1,172 @@ +const $ = (sel) => document.querySelector(sel); + +const listEl = $("#taskList"); +const overlay = $("#modalOverlay"); +const modal = $("#taskModal"); +const closeModalBtn = $("#closeModal"); +const refreshBtn = $("#refreshBtn"); +const copyBtn = $("#copyBtn"); +const completeBtn = $("#completeBtn"); + +const promptHost = $("#promptEditor"); +const answerHost = $("#answerEditor"); + +let currentTaskId = null; +let currentPromptText = ""; + +let monacoLoaded = false; +let promptEditor = null; +let answerEditor = null; + +// Relative base: when opened at /manual, "api/..." resolves to /manual/api/... +const API_BASE = `${window.location.pathname.replace(/\/$/, "")}/api`; + +/* ---------------- Monaco loader ---------------- */ +function loadMonaco() { + return new Promise((resolve, reject) => { + if (monacoLoaded && window.monaco) return resolve(); + if (!window.require) return reject(new Error("Monaco AMD loader not found")); + window.require.config({ + paths: { vs: "https://cdn.jsdelivr.net/npm/monaco-editor@0.52.0/min/vs" }, + }); + window.require(["vs/editor/editor.main"], () => { + monacoLoaded = true; + resolve(); + }); + }); +} + +/* ---------------- Helpers ---------------- */ +async function fetchJSON(url) { + const r = await fetch(url, { cache: "no-store" }); + if (!r.ok) throw new Error(await r.text()); + return r.json(); +} + +function taskCardHtml(t) { + const short = t.id.slice(0, 8); + return ` +
+
+
${t.model || "model: n/a"}
+
#${short}
+
${t.created_at || ""}
+
+
+
+ `; +} + +async function loadTasks() { + const data = await fetchJSON(`${API_BASE}/tasks`); + listEl.innerHTML = data.tasks.length + ? data.tasks.map(taskCardHtml).join("") + : `
No pending tasks.
`; + + document.querySelectorAll(".provide-btn").forEach((btn) => { + btn.addEventListener("click", async (e) => { + const card = e.target.closest(".task-card"); + const taskId = card.getAttribute("data-id"); + await openTask(taskId); + }); + }); +} + +function getEditorText(editor) { + return editor ? editor.getValue() : ""; +} + +function createEditors() { + const optsCommon = { + language: "plaintext", + wordWrap: "on", + scrollBeyondLastLine: false, + minimap: { enabled: false }, + fontLigatures: false, + fontSize: 14, + lineNumbers: "on", + renderLineHighlight: "line", + automaticLayout: true, + }; + + promptEditor = monaco.editor.create(promptHost, { + ...optsCommon, + readOnly: true, + theme: "vs-dark", + value: currentPromptText || "", + }); + + answerEditor = monaco.editor.create(answerHost, { + ...optsCommon, + readOnly: false, + theme: "hc-black", + value: "", + }); +} + +function disposeEditors() { + if (promptEditor) { promptEditor.dispose(); promptEditor = null; } + if (answerEditor) { answerEditor.dispose(); answerEditor = null; } +} + +/* ---------------- Modal open/close ---------------- */ +async function openTask(taskId) { + const data = await fetchJSON(`${API_BASE}/tasks/${taskId}`); + currentTaskId = data.id; + currentPromptText = data.display_prompt || ""; + + overlay.classList.remove("hidden"); + modal.classList.remove("hidden"); + + await loadMonaco(); + createEditors(); + + copyBtn.onclick = async () => { + try { + await navigator.clipboard.writeText(currentPromptText); + copyBtn.textContent = "Copied"; + setTimeout(() => (copyBtn.textContent = "Copy"), 900); + } catch (e) { + alert("Copy failed: " + e.message); + } + }; + + setTimeout(() => { answerEditor && answerEditor.focus(); }, 0); +} + +function closeModal() { + disposeEditors(); + currentTaskId = null; + currentPromptText = ""; + overlay.classList.add("hidden"); + modal.classList.add("hidden"); +} + +closeModalBtn.addEventListener("click", closeModal); +overlay.addEventListener("click", closeModal); +refreshBtn.addEventListener("click", loadTasks); + +/* ---------------- Submit ---------------- */ +completeBtn.addEventListener("click", async () => { + const answer = getEditorText(answerEditor).trim(); + if (!answer) { + alert("Answer is empty."); + return; + } + const form = new FormData(); + form.append("answer", answer); + + const r = await fetch(`${API_BASE}/tasks/${currentTaskId}/answer`, { + method: "POST", + body: form, + }); + if (!r.ok) { + alert("Submit failed: " + (await r.text())); + return; + } + closeModal(); + await loadTasks(); +}); + +/* ---------------- Init ---------------- */ +window.addEventListener("DOMContentLoaded", loadTasks); diff --git a/scripts/templates/manual_page.html b/scripts/templates/manual_page.html new file mode 100644 index 000000000..566222270 --- /dev/null +++ b/scripts/templates/manual_page.html @@ -0,0 +1,48 @@ + + + + + OpenEvolve – Manual Mode + + + + + +
+

Manual Tasks

+ +
+ +
+
+
+ + + + + + + + diff --git a/scripts/visualizer.py b/scripts/visualizer.py index 98f16a974..ed520d974 100644 --- a/scripts/visualizer.py +++ b/scripts/visualizer.py @@ -1,10 +1,13 @@ import os import json import glob -import logging import shutil +import logging import re as _re -from flask import Flask, render_template, render_template_string, jsonify + +from flask import Flask, render_template, jsonify + +from manual import create_manual_blueprint logger = logging.getLogger(__name__) @@ -113,7 +116,7 @@ def program_page(program_id): data = load_evolution_data(checkpoint_dir) program_data = next((p for p in data["nodes"] if p["id"] == program_id), None) - program_data = {"code": "", "prompts": {}, **program_data} + program_data = {"code": "", "prompts": {}, **(program_data or {})} artifacts_json = program_data.get("artifacts_json", None) return render_template( @@ -165,10 +168,16 @@ def run_static_export(args): shutil.copytree(static_src, static_dst) logger.info( - f"Static export written to {output_dir}/\nNote: This will only work correctly with a web server, not by opening the HTML file directly in a browser. Try $ python3 -m http.server --directory {output_dir} 8080" + f"Static export written to {output_dir}/\n" + f"Note: use a web server, not file://. " + f"Try: python3 -m http.server --directory {output_dir} 8080" ) +# Manual mode blueprint mounted at /manual +app.register_blueprint(create_manual_blueprint(lambda: os.environ.get("EVOLVE_OUTPUT", "examples/"))) + + if __name__ == "__main__": import argparse @@ -177,7 +186,7 @@ def run_static_export(args): "--path", type=str, default="examples/", - help="Path to openevolve_output or checkpoints folder", + help="Path to OpenEvolve run output directory (e.g. openevolve_output) or its checkpoints/checkpoint_*.", ) parser.add_argument("--host", type=str, default="127.0.0.1") parser.add_argument("--port", type=int, default=8080) @@ -204,7 +213,6 @@ def run_static_export(args): run_static_export(args) os.environ["EVOLVE_OUTPUT"] = args.path - logger.info( - f"Starting server at http://{args.host}:{args.port} with log level {args.log_level.upper()}" - ) + logger.info(f"Starting server at http://{args.host}:{args.port} with log level {args.log_level.upper()}") + logger.info(f"Manual UI: http://{args.host}:{args.port}/manual") app.run(host=args.host, port=args.port, debug=True)