Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Feature method run multi prompts #33

Merged
merged 8 commits into from
Feb 1, 2024
14 changes: 14 additions & 0 deletions data/autora/prompts/all_prompt.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,14 @@
[
{
"SYS": "You are a technical documentation writer. You always write clear, concise, and accurate documentation for\nscientific experiments. Your documentation focuses on the experiment's purpose, procedure, and results. Therefore,\ndetails about specific python functions, packages, or libraries are not necessary. Your readers are experimental\nscientists.",
"INSTR": "Please generate high-level one or two paragraph documentation for the following experiment."
},
{
"SYS": "You are a technical documentation writer. You always write clear, concise, and accurate documentation\nfor scientific experiments. Your documentation focuses on the experiment's procedure. Therefore, details about specific\npython functions, packages, or libraries are NOT necessary. Your readers are experimental scientists.\nFor writing your descriptions, follow these instructions:\n- DO NOT write greetings or preambles\n- Use the Variable 'name' attribute and not the python variable names\n- Use LaTeX for math expressions\n- DO NOT include code or code-like syntax and do not use python function or class names\n- Write in paragraph style, NOT bullet points",
"INSTR": "Generate a one line description of the dependent and independent variables used in the following\npython code: "
},
{
"SYS": "You are a research scientist. You always write clear, concise, and accurate documentation\nfor scientific experiments from python code. Your documentation focuses on the experiment's procedure. Therefore, details about specific\npython functions, packages, or libraries are NOT necessary. Your readers are experimental scientists.\nFor writing your descriptions, follow these instructions:\n- DO NOT write greetings or preambles\n- Use the Variable 'name' attribute and not the python variable names\n- Use LaTeX for math expressions\n- DO NOT include code or code-like syntax and do not use python function or class names\n- Write in paragraph style, NOT bullet points",
"INSTR": "Generate a three line description of the dependent and independent variables used in the following\npython code: "
}
]
42 changes: 41 additions & 1 deletion src/autora/doc/pipelines/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,8 @@
from nltk.translate.meteor_score import single_meteor_score

from autora.doc.runtime.predict_hf import Predictor
from autora.doc.runtime.prompts import PROMPTS, PromptIds
from autora.doc.runtime.prompts import PROMPTS, PromptBuilder, PromptIds
from autora.doc.util import get_eval_result_from_prediction, get_prompts_from_file, load_file

app = typer.Typer()
logging.basicConfig(
Expand Down Expand Up @@ -47,6 +48,45 @@ def evaluate_documentation(predictions: List[str], references: List[str]) -> Tup
return (bleu, meteor)


@app.command(help="Evaluate a model for code-to-documentation generation for all prompts in the prompts_file")
def eval_prompts(
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I was about to ask you to add a doc-comment for this function. In particular because it's hard to tell what the List[Dict[str,str]] will contain. But I think a better option is to create a type (a dataclass?) for the return type, e.g. an EvalResult class.

data_file: str = typer.Argument(..., help="JSONL Data file to evaluate on"),
model_path: str = typer.Option("meta-llama/Llama-2-7b-chat-hf", help="Path to HF model"),
prompts_file: str = typer.Argument(..., help="JSON file with a list of dictionary of prompts"),
param: List[str] = typer.Option(
[], help="Additional float parameters to pass to the model as name=float pairs"
),
) -> List[Dict[str, str]]:
import mlflow

results_list = []

mlflow.autolog()
param_dict = {pair[0]: float(pair[1]) for pair in [pair.split("=") for pair in param]}
run = mlflow.active_run()

prompts_list = get_prompts_from_file(prompts_file)

if run is None:
run = mlflow.start_run()
with run:
logger.info(f"Active run_id: {run.info.run_id}")
logger.info(f"running predict with {data_file}")
logger.info(f"model path: {model_path}")
mlflow.log_params(param_dict)
mlflow.log_param("model_path", model_path)
mlflow.log_param("data_file", data_file)
carlosgjs marked this conversation as resolved.
Show resolved Hide resolved
mlflow.log_param("prompts_file", prompts_file)
predictor = Predictor(model_path)
for i in range(len(prompts_list)):
logger.info(f"Starting to run model on prompt {i}")
prediction_with_scores = eval_prompt(data_file, predictor, prompts_list[i], param_dict)
logger.info(f"Model run completed on prompt {i}: {prompts_list[i]}")
eval_result = get_eval_result_from_prediction(prediction_with_scores, prompts_list[i])
results_list.append(eval_result)
return results_list


@app.command(help="Evaluate model on a data file")
def eval(
data_file: str = typer.Argument(..., help="JSONL Data file to evaluate on"),
Expand Down
29 changes: 29 additions & 0 deletions src/autora/doc/util.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,29 @@
import json
from typing import Any, Dict, List, Tuple

from autora.doc.runtime.prompts import PromptBuilder


def load_file(json_file_path: str) -> List[Dict[str, Any]]:
# Read and parse the JSON file
with open(json_file_path, "r") as file:
data: List[Dict[str, Any]] = json.load(file)
return data


def get_prompts_from_file(prompts_file: str) -> List[str]:
prompts_data = load_file(prompts_file)
prompts_list = [PromptBuilder(p["SYS"], p["INSTR"]).build() for p in prompts_data]
return prompts_list


def get_eval_result_from_prediction(
prediction: Tuple[List[str], float, float], prompt: str
) -> Dict[str, Any]:
eval_result = {
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

See comment above, would be good to make this strongly typed

"prediction": prediction[0],
"bleu": prediction[1],
"meteor": prediction[2],
"prompt": prompt,
}
return eval_result
12 changes: 11 additions & 1 deletion tests/test_main.py
Original file line number Diff line number Diff line change
@@ -1,9 +1,10 @@
from pathlib import Path
from typing import Dict, List

import jsonlines
import pytest

from autora.doc.pipelines.main import eval, evaluate_documentation, generate, import_data
from autora.doc.pipelines.main import eval, eval_prompts, evaluate_documentation, generate, import_data
from autora.doc.runtime.prompts import PromptIds

# dummy HF model for testing
Expand Down Expand Up @@ -84,3 +85,12 @@ def test_import(tmp_path: Path) -> None:
import_data(str(code), str(text), str(data))
new_lines = data.read_text().splitlines()
assert len(new_lines) == 1, "Expected one new line"


def test_eval_prompts() -> None:
data_file = Path(__file__).parent.joinpath("../data/sweetpea/data.jsonl").resolve()
prompts_file = Path(__file__).parent.joinpath("../data/autora/prompts/all_prompt.json").resolve()
outputs: List[Dict[str, str]] = eval_prompts(str(data_file), TEST_HF_MODEL, str(prompts_file), [])
assert len(outputs) == 3, "Expected 3 outputs"
for output in outputs:
assert len(output) > 0, "Expected non-empty output"
32 changes: 32 additions & 0 deletions tests/test_util.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,32 @@
from pathlib import Path

from autora.doc.util import get_eval_result_from_prediction, get_prompts_from_file, load_file


def test_load_file() -> None:
prompts_file_path = Path(__file__).parent.joinpath("../data/autora/prompts/all_prompt.json").resolve()
data = load_file(str(prompts_file_path))
assert type(data) == list


def test_get_prompts_from_file() -> None:
prompts_file_path = Path(__file__).parent.joinpath("../data/autora/prompts/all_prompt.json").resolve()
prompts_list = get_prompts_from_file(str(prompts_file_path))

assert len(prompts_list) == 3, "Expected 3 outputs"
for prompt in prompts_list:
assert type(prompt) == str


def test_get_eval_result_from_prediction() -> None:
prediction = (["response1", "response2"], 0.8, 0.7)
prompt = "prompt1"
result = get_eval_result_from_prediction(prediction, prompt)
expected_result = {
"prediction": ["response1", "response2"],
"bleu": 0.8,
"meteor": 0.7,
"prompt": "prompt1",
}
assert type(result) == dict # Assert result is a dictionary
assert result == expected_result # Assert specific keys and values
Loading