@classmethod
def prompt_renderer(cls, input_dict: Mapping[str, Any]) -> str:
current_instruction = input_dict.get("current_instruction_doc")
if not isinstance(current_instruction, str):
raise TypeError("current_instruction_doc must be a string")
dataset = input_dict.get("dataset_with_feedback")
if not isinstance(dataset, Sequence) or isinstance(dataset, (str, bytes)):
raise TypeError("dataset_with_feedback must be a sequence of records")
def format_samples(samples):
def render_value(value, level=3):
# level controls markdown header depth (###, ####, etc.)
if isinstance(value, dict):
s = ""
for k, v in value.items():
s += f"{'#' * level} {k}\n"
s += render_value(v, min(level + 1, 6))
if not value:
s += "\n"
return s
elif isinstance(value, list | tuple):
s = ""
for i, item in enumerate(value):
s += f"{'#' * level} Item {i + 1}\n"
s += render_value(item, min(level + 1, 6))
if not value:
s += "\n"
return s
else:
return f"{str(value).strip()}\n\n"
def convert_sample_to_markdown(sample, examplenum):
s = f"# Example {examplenum}\n"
for key, val in sample.items():
s += f"## {key}\n"
s += render_value(val, level=3)
return s
return "\n\n".join(convert_sample_to_markdown(sample, i + 1) for i, sample in enumerate(samples))
prompt_template = input_dict.get("prompt_template")
if prompt_template is None:
prompt_template = cls.default_prompt_template
cls.validate_prompt_template(prompt_template)
prompt = prompt_template.replace("<curr_instructions>", current_instruction)
prompt = prompt.replace("<inputs_outputs_feedback>", format_samples(dataset))
return prompt