-
Notifications
You must be signed in to change notification settings - Fork 19
Expand file tree
/
Copy pathstar.py
More file actions
96 lines (86 loc) · 3.17 KB
/
star.py
File metadata and controls
96 lines (86 loc) · 3.17 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
"""Main STaR Loop"""
from copy import deepcopy
from datasets import Dataset, DatasetDict, load_dataset
from examples.star.inference import generate_predictions
from examples.star.train import train
from examples.star.utils import (
execute_tests,
format_solution,
generate_prompt,
parse_args,
)
def main() -> None:
args = parse_args()
ds = load_dataset(args.dataset_name, args.dataset_config_name)
assert "train" in ds
# format the dataset for training and evaluation
for split in ds:
texts = []
if split == "train":
continue
for example in ds[split]:
canonical_solution = f"```python\n{example['canonical_solution']}\n```"
text = [
{
"role": "user",
"message": generate_prompt(example["prompt"], example["test"]),
},
{
"role": "assistant",
"message": format_solution(canonical_solution, example["prompt"]),
},
]
texts.append(text)
ds[split] = ds[split].add_column(name="text", column=texts)
model_name = args.model_name_or_path
output_dir = deepcopy(args.output_dir)
for i in range(args.iteration):
# sample
all_samples = generate_predictions(
model_name, ds["train"], args.temperature, args.n
)
ds["train"].add_column(name="sample", column=all_samples).to_json(
f"{output_dir}/data/samples-iter{i}.json"
)
assert len(ds["train"]) == len(all_samples)
# verify and construct the training set
all_traces, all_execution_results = execute_tests(
ds["train"], all_samples, max_workers=args.max_workers
)
passed_examples = []
for example, execution_results, samples in zip(
ds["train"], all_execution_results, all_samples
):
for execution_result, sample in zip(execution_results, samples):
# pytest exit code: https://docs.pytest.org/en/stable/reference/exit-codes.html
if execution_result == 0:
example["text"] = [
{
"role": "user",
"message": generate_prompt(
example["prompt"], example["test"]
),
},
{
"role": "assistant",
"message": format_solution(sample, example["prompt"]),
},
]
passed_examples.append(example)
break
raw_datasets = DatasetDict(
{
"train": Dataset.from_list(passed_examples),
"validation": ds["validation"],
}
)
raw_datasets["train"].to_json(
f"{output_dir}/data/verified-samples-iter{i}.json"
)
# train
args.output_dir = f"{output_dir}/models-iter{i}"
train(raw_datasets, model_name, args)
model_name = args.output_dir
if __name__ == "__main__":
main()
__all__ = []