Skip to content

TerminalBenchAdapter

gepa.adapters.terminal_bench_adapter.terminal_bench_adapter.TerminusAdapter(n_concurrent: int = 6, instruction_prompt_path: str = 'prompt-templates/instruction_prompt.txt')

Bases: GEPAAdapter

Source code in gepa/adapters/terminal_bench_adapter/terminal_bench_adapter.py
def __init__(
    self,
    n_concurrent: int = 6,
    instruction_prompt_path: str = "prompt-templates/instruction_prompt.txt",
):
    self.n_concurrent = n_concurrent
    self.instruction_prompt_path = instruction_prompt_path

Attributes

n_concurrent = n_concurrent instance-attribute

instruction_prompt_path = instruction_prompt_path instance-attribute

propose_new_texts: ProposalFn | None = None class-attribute instance-attribute

Functions

evaluate(batch: list[TerminalBenchTask], candidate: dict[str, str], capture_traces: bool = False) -> EvaluationBatch

Source code in gepa/adapters/terminal_bench_adapter/terminal_bench_adapter.py
def evaluate(
    self,
    batch: list[TerminalBenchTask],
    candidate: dict[str, str],
    capture_traces: bool = False,
) -> EvaluationBatch:
    outputs = []
    scores = []
    trajectories = []
    example_run_id = "temp_gepa_run" + "_" + datetime.now().strftime("%Y%m%d%H%M%S")
    example_model_name = batch[0].model_name

    run_agent_tb(
        [task.task_id for task in batch],
        example_run_id,
        example_model_name,
        instruction_prompt=candidate["instruction_prompt"],
        n_concurrent=self.n_concurrent,
        prompt_template_path=self.instruction_prompt_path,
    )

    for example in batch:
        try:
            success, score, failed_reason, messages = get_results(example.task_id, example_run_id)
        except Exception as e:
            print(f"Error running example {example.task_id} {example_run_id}: {e}")
            success = False
            score = 0
            failed_reason = str(e)
            messages = []

        outputs.append(
            f"Terminal Bench outputs are omitted. Please see runs/{example_run_id}/{example.task_id}/ for detailed logging."
        )
        scores.append(score)
        trajectories.append(
            {
                "messages": messages,
                "instruction_prompt": candidate["instruction_prompt"],
                "failed_reason": failed_reason,
                "success": success,
            }
        )
    return EvaluationBatch(
        outputs=outputs,
        scores=scores,
        trajectories=trajectories,
    )

make_reflective_dataset(candidate: dict[str, str], eval_batch: EvaluationBatch, components_to_update: list[str])

Source code in gepa/adapters/terminal_bench_adapter/terminal_bench_adapter.py
def make_reflective_dataset(
    self,
    candidate: dict[str, str],
    eval_batch: EvaluationBatch,
    components_to_update: list[str],
):
    reflective_dataset = {"instruction_prompt": []}
    for _score, trajectory in zip(eval_batch.scores, eval_batch.trajectories, strict=False):
        if trajectory["success"]:
            feedback = "Successfully solved the task!"
        else:
            feedback = f"Failed to solve the task. Reason: {trajectory['failed_reason']}"
        reflective_dataset["instruction_prompt"].append(
            {
                "Message History": trajectory["messages"],
                "Instruction Prompt": candidate["instruction_prompt"],
                "Feedback": feedback,
            }
        )
    return reflective_dataset